repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
7
content
stringlengths
711
1.04M
license
stringclasses
15 values
hash
int64
-9,223,328,406,218,787,000
9,223,331,109B
line_mean
float64
5.74
99.7
line_max
int64
17
1k
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
jdtogni/trader
tests/old/dual_moving_avg.py
1
1117
from zipline.api import order_target, record, symbol, history, add_history def initialize(context): # Register 2 histories that track daily prices, # one with a 100 window and one with a 300 day window add_history(100, '1d', 'price') add_history(300, '1d', 'price') context.i = 0 def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < 300: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = history(100, '1d', 'price').mean() long_mavg = history(300, '1d', 'price').mean() sym = symbol('AAPL') # Trading logic if short_mavg[sym] > long_mavg[sym]: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(sym, 100) elif short_mavg[sym] < long_mavg[sym]: order_target(sym, 0) # Save values for later inspection record(AAPL=data[sym].price, short_mavg=short_mavg[sym], long_mavg=long_mavg[sym])
apache-2.0
5,833,349,686,236,586,000
28.394737
74
0.63205
false
hyperspy/hyperspy
hyperspy/drawing/mpl_he.py
1
8993
# -*- coding: utf-8 -*- # Copyright 2007-2021 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. from functools import partial import logging from traits.api import Undefined from hyperspy.drawing import widgets, signal1d, image from hyperspy.defaults_parser import preferences _logger = logging.getLogger(__name__) class MPL_HyperExplorer(object): """ """ def __init__(self): self.signal_data_function = None self.navigator_data_function = None # args to pass to `__call__` self.signal_data_function_kwargs = {} self.axes_manager = None self.signal_title = '' self.navigator_title = '' self.quantity_label = '' self.signal_plot = None self.navigator_plot = None self.axis = None self.pointer = None self._pointer_nav_dim = None def plot_signal(self, **kwargs): # This method should be implemented by the subclasses. # Doing nothing is good enough for signal_dimension==0 though. if self.axes_manager.signal_dimension == 0: return if self.signal_data_function_kwargs.get('fft_shift', False): self.axes_manager = self.axes_manager.deepcopy() for axis in self.axes_manager.signal_axes: axis.offset = -axis.high_value / 2. def plot_navigator(self, title=None, **kwargs): """ Parameters ---------- title : str, optional Title of the navigator. The default is None. **kwargs : dict The kwargs are passed to plot method of :py:meth:`hyperspy.drawing.image.ImagePlot` or :py:meth:`hyperspy.drawing.signal1d.Signal1DLine`. """ if self.axes_manager.navigation_dimension == 0: return if self.navigator_data_function is None: return if self.navigator_data_function == "slider": self._get_navigation_sliders() return title = title or self.signal_title + " Navigator" if self.signal_title else "" if len(self.navigator_data_function().shape) == 1: # Create the figure sf = signal1d.Signal1DFigure(title=title) axis = self.axes_manager.navigation_axes[0] sf.xlabel = '%s' % str(axis) if axis.units is not Undefined: sf.xlabel += ' (%s)' % axis.units sf.ylabel = r'$\Sigma\mathrm{data\,over\,all\,other\,axes}$' sf.axis = axis sf.axes_manager = self.axes_manager self.navigator_plot = sf # Create a line to the left axis with the default indices sl = signal1d.Signal1DLine() sl.data_function = self.navigator_data_function # Set all kwargs value to the image figure before passing the rest # of the kwargs to plot method of the image figure for key in list(kwargs.keys()): if hasattr(sl, key): setattr(sl, key, kwargs.pop(key)) sl.set_line_properties(color='blue', type='step' if axis.is_uniform else 'line') # Add the line to the figure sf.add_line(sl) sf.plot() self.pointer.set_mpl_ax(sf.ax) if self.axes_manager.navigation_dimension > 1: self._get_navigation_sliders() for axis in self.axes_manager.navigation_axes[:-2]: axis.events.index_changed.connect(sf.update, []) sf.events.closed.connect( partial(axis.events.index_changed.disconnect, sf.update), []) self.navigator_plot = sf elif len(self.navigator_data_function().shape) >= 2: # Create the figure imf = image.ImagePlot(title=title) imf.data_function = self.navigator_data_function # Set all kwargs value to the image figure before passing the rest # of the kwargs to plot method of the image figure for key, value in list(kwargs.items()): if hasattr(imf, key): setattr(imf, key, kwargs.pop(key)) # Navigator labels if self.axes_manager.navigation_dimension == 1: imf.yaxis = self.axes_manager.navigation_axes[0] imf.xaxis = self.axes_manager.signal_axes[0] elif self.axes_manager.navigation_dimension >= 2: imf.yaxis = self.axes_manager.navigation_axes[1] imf.xaxis = self.axes_manager.navigation_axes[0] if self.axes_manager.navigation_dimension > 2: self._get_navigation_sliders() for axis in self.axes_manager.navigation_axes[2:]: axis.events.index_changed.connect(imf.update, []) imf.events.closed.connect( partial(axis.events.index_changed.disconnect, imf.update), []) if "cmap" not in kwargs.keys() or kwargs['cmap'] is None: kwargs["cmap"] = preferences.Plot.cmap_navigator imf.plot(**kwargs) self.pointer.set_mpl_ax(imf.ax) self.navigator_plot = imf if self.navigator_plot is not None: self.navigator_plot.events.closed.connect( self._on_navigator_plot_closing, []) def _get_navigation_sliders(self): try: self.axes_manager.gui_navigation_sliders( title=self.signal_title + " navigation sliders") except (ValueError, ImportError) as e: _logger.warning("Navigation sliders not available. " + str(e)) def close_navigator_plot(self): if self.navigator_plot: self.navigator_plot.close() @property def is_active(self): """A plot is active when it has the figure open meaning that it has either one of 'signal_plot' or 'navigation_plot' is not None and it has a attribute 'figure' which is not None. """ if self.signal_plot and self.signal_plot.figure: return True elif self.navigator_plot and self.navigator_plot.figure: return True else: return False def plot(self, **kwargs): # Parse the kwargs for plotting complex data for key in ['power_spectrum', 'fft_shift']: if key in kwargs: self.signal_data_function_kwargs[key] = kwargs.pop(key) if self.pointer is None: pointer = self.assign_pointer() if pointer is not None: self.pointer = pointer(self.axes_manager) self.pointer.color = 'red' self.pointer.connect_navigate() self.plot_navigator(**kwargs.pop('navigator_kwds', {})) if pointer is not None: self.navigator_plot.events.closed.connect( self.pointer.disconnect, []) self.plot_signal(**kwargs) def assign_pointer(self): if self.navigator_data_function is None: nav_dim = 0 elif self.navigator_data_function == "slider": nav_dim = 0 else: nav_dim = len(self.navigator_data_function().shape) if nav_dim == 2: # It is an image if self.axes_manager.navigation_dimension > 1: Pointer = widgets.SquareWidget else: # It is the image of a "spectrum stack" Pointer = widgets.HorizontalLineWidget elif nav_dim == 1: # It is a spectrum Pointer = widgets.VerticalLineWidget else: Pointer = None self._pointer_nav_dim = nav_dim return Pointer def _on_navigator_plot_closing(self): self.navigator_plot = None def _on_signal_plot_closing(self): self.signal_plot = None def close(self): """When closing, we make sure: - close the matplotlib figure - drawing events are disconnected - the attribute 'signal_plot' and 'navigation_plot' are set to None """ if self.is_active: if self.signal_plot: self.signal_plot.close() self.close_navigator_plot()
gpl-3.0
-4,064,256,368,221,523,000
38.1
86
0.57845
false
marinkaz/medusa
medusa/medusa_cpe.py
1
6699
# coding: utf-8 # Copyright (C) 2016 Marinka Zitnik <marinka@cs.stanford.edu> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from operator import itemgetter import logging import time import numpy as np from scipy.special import gammaln from scipy.integrate import quad __version__ = '0.1' __all__ = ['medusa'] _DEF_ALPHA = 0.5 _DEF_Q = 0.25 logging.basicConfig(level=logging.INFO) _log = logging.getLogger('MEDUSA') def _binom(n, k): """Continuous binomial coefficient. It provides the functional form necessary to interpolate Newton's generalized binomial coefficient.""" return gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1) def _op(X): """Compute power of an object profile segment.""" score = np.sum(X) return score def _prob(nCkq, C, s0, k, kq, ns0, weights): """Compute probability for an observation under a null model.""" nCs0_q = _op(C[s0, :][:, kq] * weights[s0, None]) nCs0 = _op(C[s0[:ns0], :]) # at most how much can be the sum of nq smallest elements given # the current normalization scheme in which the sum of all elements # is equal to ndim? -> len(kq) # at least how much can the sum of nq largest elements given # the current normalization scheme in which the sum of all elements # is equal to ndim? -> len(kq) nCk = _op(C[k, :]) nCkq -= len(kq) nCs0_diff_ncs = nCs0 - nCs0_q nCkq_diff_nsc = nCk - nCkq score = _binom(nCs0_q, nCkq) + _binom(nCs0_diff_ncs, nCkq_diff_nsc) - _binom(nCs0, nCk) score = np.exp(score) return score def pvalue(nCkq, C, s0, k, kq, ns0, weights): """Compute p-value for an observation under a null model.""" pval, err_bound = quad(_prob, len(kq), nCkq, args=(C, s0, k, kq, ns0, weights)) return pval def medusa(C, s0, nk, alpha=_DEF_ALPHA, q=_DEF_Q, return_itr2scores=False): """ MEDUSA algorithm to estimate `nk`-maximally significant module to the pivots `s0`. Parameters ---------- C : ndarray Chained matrix relating objects E1 to objects E2. s0 : ndarray Indices of relevant objects E1. nk : int Desired size of the module. Used in submodular optimization. alpha : float, optional Concentration parameter promoting modules that are tight around original seed genes. Weight parameter to give higher weights to the seed objects compared to those that are agglomerated into the module at later iteration steps. Zero means seed objects are treated the same way as objects that were only predicted. One means predicted objects are not added to the seed set. 0.5 by default. q : float, optional Strength parameter. It defines the fraction of E2 objects whose associations are considered in computations. One means that associations with *all* E2 object are used. 0.25 by default. return_itr2scores : bool, optional Return a dict keyed by iterations that holds computed scores of the objects. False by default. Returns ------- S : ndarray Array of shape (nk,) holding objects in the module. P : ndarray Array of shape (nk,) holding p-values for objects in the module. exectimes : ndarray Execution times to expand the module in each iteration. itr2scores : dict, optional Dict mapping iterations to computed scores. Examples -------- >>> ndim = 40 >>> s0 = np.arange(5) >>> ns0 = len(s0) >>> a = 0.8*np.random.rand(ns0, ns0) >>> cov = np.dot(a, a.T) >>> C = np.random.rand(100, ndim) >>> C[s0] = np.random.multivariate_normal(np.zeros(ns0), cov, ndim).T >>> S, P, _ = medusa(C, s0, 10) """ # ------------- check input ---------------------------------------------- n, ndim = C.shape ns0 = len(s0) if not nk <= n: raise ValueError('Size of module must be less than the number of objects.') if any(s0 < 0) or any(s0 >= n): raise ValueError('Relevant objects E1 must be provided in C.') if q <= 0. or q > 1: raise ValueError('Strength parameter must be in (0,1) interval.') _log.debug( '[Config] C: %s | S0: %d | Nk: %7.1e | alpha: %7.1e' % (str(C.shape), ns0, nk, alpha) ) itr2scores = {} if return_itr2scores else None # ------- normalize C for the null model ---------------------------------- C = C / C.sum(axis=1)[:, None] * ndim C = np.nan_to_num(C) nq = int(q * ndim) # ------- compute module -------------------------------------------------- KQ = [(k, np.argsort(C[k])[-nq:]) for k in range(n) if k not in s0] fit = np.zeros(nk) weights = np.zeros(n) weights[s0] = 1. S, P = [], [] exectimes = [] for itr in range(nk): tic = time.time() pvalues = [ (k, pvalue(_op(C[k, kq]), C, s0, k, kq, ns0, weights)) for k, kq in KQ ] scores = [(k, np.exp(-pv), pv) for k, pv in pvalues] scores = sorted(scores, key=itemgetter(1), reverse=True) KQ = [(k, kq) for k, kq in KQ if k != scores[0][0]] S.append(scores[0][0]) P.append(scores[0][2]) if return_itr2scores: itr2scores[itr] = scores if scores[0][0] not in s0: weights[scores[0][0]] = (1. - alpha)**(itr + 1) s0 = np.r_[s0, scores[0][0]] fit[itr] = fit[itr-1] + scores[0][1] toc = time.time() exectimes.append(toc - tic) # _log.info('[%3d] fit: %0.5f | object: %d | p-value: %6.3e | ' # 'secs: %.5f | s_itr: %s' % ( # itr, fit[itr], S[-1], P[-1], exectimes[-1], ', '.join(map(str, s0)) # )) _log.info('[%3d] fit: %0.5f | object: %d | p-value: %6.3e | secs: %.5f' % ( itr, fit[itr], S[-1], P[-1], exectimes[-1] )) _log.info('[end] non-monotone: %d' % np.sum(np.diff(fit) < 0)) if not return_itr2scores: return np.array(S), np.array(P), np.array(exectimes) else: return np.array(S), np.array(P), np.array(exectimes), itr2scores
gpl-2.0
-2,919,836,493,607,939,000
32.833333
91
0.59173
false
nicocardiel/xmegara
traces2ds9.py
1
5296
from __future__ import division from __future__ import print_function import argparse from copy import deepcopy import json import numpy as np from numpy.polynomial import Polynomial def traces2ds9(json_file, ds9_file, rawimage, numpix=100, fibid_at=0, yoffset=0.0): """Transfor fiber traces from JSON to ds9-region format. Parameters ---------- json_file : str Input JSON file name. ds9_file : file Handle to output file name in ds9-region format. rawimage : bool If True the traces must be generated to be overplotted on raw FITS images. numpix : int Number of abscissae per fiber trace. fibid_at : int Abscissae where the fibid is shown (default=0 -> not shown). yoffset : float Vertical offset (in pixels). """ # offset between polynomial and image abscissae if rawimage: ix_offset = 51 else: ix_offset = 1 # open output file and insert header ds9_file.write('# Region file format: DS9 version 4.1\n') ds9_file.write('global color=green dashlist=2 4 width=2 ' 'font="helvetica 10 normal roman" select=1 ' 'highlite=1 dash=1 fixed=0 edit=1 ' 'move=1 delete=1 include=1 source=1\n') ds9_file.write('physical\n') # read traces from JSON file and save region in ds9 file colorbox = ['#ff77ff', '#4444ff'] bigdict = json.loads(open(json_file).read()) insmode = bigdict['tags']['insmode'] ds9_file.write('#\n# insmode: {0}\n'.format(insmode)) vph = bigdict['tags']['vph'] ds9_file.write('# vph: {0}\n'.format(vph)) uuid = bigdict['uuid'] ds9_file.write('# uuid: {0}\n'.format(uuid)) # check for global_offset in JSON file if 'global_offset' in bigdict.keys(): global_offset_poly = np.polynomial.Polynomial(bigdict['global_offset']) ref_column = bigdict['ref_column'] else: global_offset_poly = np.polynomial.Polynomial([0.0]) ref_column = 2000 for fiberdict in bigdict['contents']: fibid = fiberdict['fibid'] boxid = fiberdict['boxid'] xmin = fiberdict['start'] xmax = fiberdict['stop'] coeff = np.array(fiberdict['fitparms']) ds9_file.write('#\n# fibid: {0}\n'.format(fibid)) # skip fibers without trace if len(coeff) > 0: xp = np.linspace(start=xmin, stop=xmax, num=numpix) ypol = Polynomial(coeff) yp = ypol(xp) if rawimage: lcut = (yp > 2056.5) yp[lcut] += 100 global_offset = global_offset_poly(ypol(ref_column)) for i in range(len(xp)-1): x1 = xp[i] + ix_offset y1 = yp[i] + 1 + yoffset + global_offset x2 = xp[i+1] + ix_offset y2 = yp[i+1] + 1 + yoffset + global_offset ds9_file.write('line {0} {1} {2} {3}'.format(x1, y1, x2, y2)) ds9_file.write(' # color={0}\n'.format(colorbox[boxid % 2])) if fibid_at != 0: if x1 <= fibid_at <= x2: ds9_file.write('text {0} {1} {{{2}}} # color=green ' 'font="helvetica 10 bold ' 'roman"\n'.format((x1+x2)/2, (y1+y2)/2, fibid)) else: print('Warning ---> Missing fiber:', fibid) def main(args=None): # parse command-line options parser = argparse.ArgumentParser(prog='traces2ds9') # positional parameters parser.add_argument("json_file", help="JSON file with fiber traces", type=argparse.FileType('r')) parser.add_argument("ds9_file", help="Output region file in ds9 format", type=argparse.FileType('w')) # optional parameters parser.add_argument("--numpix", help="Number of pixels/trace (default 100)", default=100, type=int) parser.add_argument("--yoffset", help="Vertical offset (+upwards, -downwards)", default=0, type=float) parser.add_argument("--new_json", help="New JSON file after applying specified yoffset", type=argparse.FileType('w')) parser.add_argument("--rawimage", help="FITS file is a RAW image (RSS assumed instead)", action="store_true") parser.add_argument("--fibid_at", help="Display fiber identification number at location", default=0, type=int) args = parser.parse_args(args=args) traces2ds9(args.json_file.name, args.ds9_file, args.rawimage, args.numpix, args.fibid_at, args.yoffset) if args.new_json is not None: bigdict = json.loads(args.json_file.read()) newdict = deepcopy(bigdict) for fiber in newdict['contents']: if len(fiber['fitparms']) > 0: fiber['fitparms'][0] += args.yoffset json.dump(newdict, args.new_json, indent=2) if __name__ == "__main__": main()
gpl-3.0
-6,811,131,174,366,681,000
36.295775
79
0.542674
false
loli/sklearn-ensembletrees
sklearn/decomposition/truncated_svd.py
1
8226
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). """ # Author: Lars Buitinck <L.J.Buitinck@uva.nl> # Olivier Grisel <olivier.grisel@ensta.org> # Michael Becker <mike@beckerfuffle.com> # License: 3-clause BSD. import warnings import numpy as np import scipy.sparse as sp try: from scipy.sparse.linalg import svds except ImportError: from ..utils.arpack import svds from ..base import BaseEstimator, TransformerMixin from ..utils import (array2d, as_float_array, atleast2d_or_csr, check_random_state) from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip from ..utils.sparsefuncs import mean_variance_axis0 __all__ = ["TruncatedSVD"] class TruncatedSVD(BaseEstimator, TransformerMixin): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). It is very similar to PCA, but operates on sample vectors directly, instead of on a covariance matrix. This means it can work with scipy.sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in sklearn.feature_extraction.text. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithm: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or (X.T * X), whichever is more efficient. Parameters ---------- n_components : int, default = 2 Desired dimensionality of output data. Must be strictly less than the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : string, default = "randomized" SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, optional Number of iterations for randomized SVD solver. Not used by ARPACK. random_state : int or RandomState, optional (Seed for) pseudo-random number generator. If not given, the numpy.random singleton is used. tol : float, optional Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- `components_` : array, shape (n_components, n_features) `explained_variance_` : array, [n_components] The variance of the training samples transformed by a projection to each component. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from sklearn.random_projection import sparse_random_matrix >>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42) >>> svd = TruncatedSVD(n_components=5, random_state=42) >>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5, random_state=42, tol=0.0) >>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...] >>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS 0.27930... See also -------- PCA RandomizedPCA References ---------- Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 Notes ----- SVD suffers from a problem called "sign indeterminancy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. """ def __init__(self, n_components=2, algorithm="randomized", n_iter=5, random_state=None, tol=0., n_iterations=None): if n_iterations is not None: warnings.warn("n_iterations was renamed to n_iter for consistency " "and will be removed in 0.16.", DeprecationWarning) n_iter = n_iterations self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit LSI model on training data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self def fit_transform(self, X, y=None): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = as_float_array(X, copy=False) random_state = check_random_state(self.random_state) # If sparse and not csr or csc, convert to csr if sp.issparse(X) and X.getformat() not in ["csr", "csc"]: X = X.tocsr() if self.algorithm == "arpack": U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] U, VT = svd_flip(U[:, ::-1], VT[::-1]) elif self.algorithm == "randomized": k = self.n_components n_features = X.shape[1] if k >= n_features: raise ValueError("n_components must be < n_features;" " got %d >= %d" % (k, n_features)) U, Sigma, VT = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state) else: raise ValueError("unknown algorithm %r" % self.algorithm) self.components_ = VT # Calculate explained variance & explained variance ratio n_samples = X.shape[0] X_transformed = np.dot(U, np.diag(Sigma)) self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis0(X) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = atleast2d_or_csr(X) return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data. Returns ------- X_original : array, shape (n_samples, n_features) Note that this is always a dense array. """ X = array2d(X) return np.dot(X, self.components_) @property def n_iterations(self): warnings.warn("n_iterations was renamed to n_iter for consistency " "and will be removed in 0.16.", DeprecationWarning) return self.n_iter
bsd-3-clause
3,497,923,530,653,270,500
35.237885
79
0.609166
false
deeplearning4j/deeplearning4j
jumpy/jumpy/ops/linalg.py
2
1319
################################################################################ # Copyright (c) 2015-2018 Skymind, Inc. # # This program and the accompanying materials are made available under the # terms of the Apache License, Version 2.0 which is available at # https://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # SPDX-License-Identifier: Apache-2.0 ################################################################################ from .op import op from ..java_classes import * # Linear algebra # https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.linalg.html @op def dot(arr, other): return arr.mmul(other) @op def tensordot(arr1, arr2, axes=2): shape1 = arr1.shape() shape2 = arr2.shape() if type(axes) is int: axes = [shape1[axes:], shape2[:axes]] elif type(axes) in [list, tuple]: axes = list(axes) for i in range(2): if type(axes[i]) is int: axes[i] = [axes[i]] return Nd4j.tensorMmul(arr1, arr2, axes)
apache-2.0
7,937,741,037,545,020,000
30.404762
80
0.601971
false
BrainTech/openbci
obci/interfaces/hybrid/p300etr/bk/p300etr_decision_peer.py
2
2170
#!/usr/bin/env python # -*- coding: utf-8 -*- from multiplexer.multiplexer_constants import peers, types from obci.control.peer.configured_multiplexer_server import ConfiguredMultiplexerServer from obci.configs import settings, variables_pb2 import random, time, sys import numpy as np from obci.interfaces import interfaces_logging as logger LOGGER = logger.get_logger("p300_etr_decision", "debug") class P300EtrDecision(ConfiguredMultiplexerServer): def __init__(self, addresses): super(P300EtrDecision, self).__init__(addresses=addresses, type=peers.RESULTS_ANALYSIS) self.ready() def initConst(self): self.fields = 8 self.tresholdValue = 0.95 def handle_message(self, mxmsg): LOGGER.info("P300EtrDecision\n") if mxmsg.type == types.ETR_ANALYSIS_RESULTS: res = variables_pb2.Sample() res.ParseFromString(mxmsg.message) LOGGER.debug("GOT ETR ANALYSIS RESULTS: "+str(res.channels)) #self.conn.send_message(message = str(dec), type = types.DECISION_MESSAGE, flush=True) elif mxmsg.type == types.P300_ANALYSIS_RESULTS: res = variables_pb2.Sample() res.ParseFromString(mxmsg.message) LOGGER.debug("GOT P300 ANALYSIS RESULTS: "+str(res.channels)) #self.conn.send_message(message = str(dec), type = types.DECISION_MESSAGE, flush=True) # Probabilty from etr pdf_etr = np.random.random(8) # Probability from p300 pdf_p300 = np.random.random(8) # Hybryd probability pdf = pdf_p300*pdf_etr # Assume pdf is T distribution loc = pdf.mean() scale = pdf.std() cdf = st.t.cdf(pdf, len(pdf), loc=loc, scale=scale) # If only one value is over threshold if np.sum( cdf > self.tresholdValue ) == 1: dec = int(np.arange(len(cdf))[cdf > self.tresholdValue]) SEND_DECISION( dec ) self.no_response if __name__ == "__main__": P300EtrDecision(settings.MULTIPLEXER_ADDRESSES).loop()
gpl-3.0
-925,453,039,939,275,100
32.90625
98
0.614747
false
mick-d/nipype
nipype/interfaces/base.py
1
72675
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package contains interfaces for using existing functionality in other packages Exaples FSL, matlab/SPM , afni Requires Packages to be installed """ from __future__ import print_function, division, unicode_literals, absolute_import from future import standard_library standard_library.install_aliases() from builtins import range, object, open, str, bytes from configparser import NoOptionError from copy import deepcopy import datetime from datetime import datetime as dt import errno import locale import os import re import platform from string import Template import select import subprocess as sp import sys import time from textwrap import wrap from warnings import warn import simplejson as json from dateutil.parser import parse as parseutc from packaging.version import Version import collections from .. import config, logging, LooseVersion, __version__ from ..utils.provenance import write_provenance from ..utils.misc import is_container, trim, str2bool from ..utils.filemanip import (md5, hash_infile, FileNotFoundError, hash_timestamp, split_filename, to_str, read_stream) from .traits_extension import ( traits, Undefined, TraitDictObject, TraitListObject, TraitError, isdefined, File, Directory, DictStrStr, has_metadata, ImageFile) from ..external.due import due nipype_version = Version(__version__) iflogger = logging.getLogger('interface') FLOAT_FORMAT = '{:.10f}'.format PY35 = sys.version_info >= (3, 5) PY3 = sys.version_info[0] > 2 VALID_TERMINAL_OUTPUT = ['stream', 'allatonce', 'file', 'file_split', 'file_stdout', 'file_stderr', 'none'] __docformat__ = 'restructuredtext' class Str(traits.Unicode): """Replacement for the default traits.Str based in bytes""" traits.Str = Str class NipypeInterfaceError(Exception): """Custom error for interfaces""" def __init__(self, value): self.value = value def __str__(self): return '{}'.format(self.value) def _exists_in_path(cmd, environ): """ Based on a code snippet from http://orip.org/2009/08/python-checking-if-executable-exists-in.html """ if 'PATH' in environ: input_environ = environ.get("PATH") else: input_environ = os.environ.get("PATH", "") extensions = os.environ.get("PATHEXT", "").split(os.pathsep) for directory in input_environ.split(os.pathsep): base = os.path.join(directory, cmd) options = [base] + [(base + ext) for ext in extensions] for filename in options: if os.path.exists(filename): return True, filename return False, None def load_template(name): """Load a template from the script_templates directory Parameters ---------- name : str The name of the file to load Returns ------- template : string.Template """ full_fname = os.path.join(os.path.dirname(__file__), 'script_templates', name) template_file = open(full_fname) template = Template(template_file.read()) template_file.close() return template class Bunch(object): """Dictionary-like class that provides attribute-style access to it's items. A `Bunch` is a simple container that stores it's items as class attributes. Internally all items are stored in a dictionary and the class exposes several of the dictionary methods. Examples -------- >>> from nipype.interfaces.base import Bunch >>> inputs = Bunch(infile='subj.nii', fwhm=6.0, register_to_mean=True) >>> inputs # doctest: +ALLOW_UNICODE Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=True) >>> inputs.register_to_mean = False >>> inputs # doctest: +ALLOW_UNICODE Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=False) Notes ----- The Bunch pattern came from the Python Cookbook: .. [1] A. Martelli, D. Hudgeon, "Collecting a Bunch of Named Items", Python Cookbook, 2nd Ed, Chapter 4.18, 2005. """ def __init__(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) def update(self, *args, **kwargs): """update existing attribute, or create new attribute Note: update is very much like HasTraits.set""" self.__dict__.update(*args, **kwargs) def items(self): """iterates over bunch attributes as key, value pairs""" return list(self.__dict__.items()) def iteritems(self): """iterates over bunch attributes as key, value pairs""" warn('iteritems is deprecated, use items instead') return list(self.items()) def get(self, *args): """Support dictionary get() functionality """ return self.__dict__.get(*args) def set(self, **kwargs): """Support dictionary get() functionality """ return self.__dict__.update(**kwargs) def dictcopy(self): """returns a deep copy of existing Bunch as a dictionary""" return deepcopy(self.__dict__) def __repr__(self): """representation of the sorted Bunch as a string Currently, this string representation of the `inputs` Bunch of interfaces is hashed to determine if the process' dirty-bit needs setting or not. Till that mechanism changes, only alter this after careful consideration. """ outstr = ['Bunch('] first = True for k, v in sorted(self.items()): if not first: outstr.append(', ') if isinstance(v, dict): pairs = [] for key, value in sorted(v.items()): pairs.append("'%s': %s" % (key, value)) v = '{' + ', '.join(pairs) + '}' outstr.append('%s=%s' % (k, v)) else: outstr.append('%s=%r' % (k, v)) first = False outstr.append(')') return ''.join(outstr) def _hash_infile(self, adict, key): # Inject file hashes into adict[key] stuff = adict[key] if not is_container(stuff): stuff = [stuff] file_list = [] for afile in stuff: if os.path.isfile(afile): md5obj = md5() with open(afile, 'rb') as fp: while True: data = fp.read(8192) if not data: break md5obj.update(data) md5hex = md5obj.hexdigest() else: md5hex = None file_list.append((afile, md5hex)) return file_list def _get_bunch_hash(self): """Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns ------- dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the `dict_withhash` """ infile_list = [] for key, val in list(self.items()): if is_container(val): # XXX - SG this probably doesn't catch numpy arrays # containing embedded file names either. if isinstance(val, dict): # XXX - SG should traverse dicts, but ignoring for now item = None else: if len(val) == 0: raise AttributeError('%s attribute is empty' % key) item = val[0] else: item = val try: if isinstance(item, str) and os.path.isfile(item): infile_list.append(key) except TypeError: # `item` is not a file or string. continue dict_withhash = self.dictcopy() dict_nofilename = self.dictcopy() for item in infile_list: dict_withhash[item] = self._hash_infile(dict_withhash, item) dict_nofilename[item] = [val[1] for val in dict_withhash[item]] # Sort the items of the dictionary, before hashing the string # representation so we get a predictable order of the # dictionary. sorted_dict = to_str(sorted(dict_nofilename.items())) return dict_withhash, md5(sorted_dict.encode()).hexdigest() def __pretty__(self, p, cycle): """Support for the pretty module pretty is included in ipython.externals for ipython > 0.10""" if cycle: p.text('Bunch(...)') else: p.begin_group(6, 'Bunch(') first = True for k, v in sorted(self.items()): if not first: p.text(',') p.breakable() p.text(k + '=') p.pretty(v) first = False p.end_group(6, ')') class InterfaceResult(object): """Object that contains the results of running a particular Interface. Attributes ---------- version : version of this Interface result object (a readonly property) interface : class type A copy of the `Interface` class that was run to generate this result. inputs : a traits free representation of the inputs outputs : Bunch An `Interface` specific Bunch that contains all possible files that are generated by the interface. The `outputs` are used as the `inputs` to another node when interfaces are used in the pipeline. runtime : Bunch Contains attributes that describe the runtime environment when the `Interface` was run. Contains the attributes: * cmdline : The command line string that was executed * cwd : The directory the ``cmdline`` was executed in. * stdout : The output of running the ``cmdline``. * stderr : Any error messages output from running ``cmdline``. * returncode : The code returned from running the ``cmdline``. """ def __init__(self, interface, runtime, inputs=None, outputs=None, provenance=None): self._version = 2.0 self.interface = interface self.runtime = runtime self.inputs = inputs self.outputs = outputs self.provenance = provenance @property def version(self): return self._version class BaseTraitedSpec(traits.HasTraits): """Provide a few methods necessary to support nipype interface api The inputs attribute of interfaces call certain methods that are not available in traits.HasTraits. These are provided here. new metadata: * usedefault : set this to True if the default value of the trait should be used. Unless this is set, the attributes are set to traits.Undefined new attribute: * get_hashval : returns a tuple containing the state of the trait as a dict and hashvalue corresponding to dict. XXX Reconsider this in the long run, but it seems like the best solution to move forward on the refactoring. """ package_version = nipype_version def __init__(self, **kwargs): """ Initialize handlers and inputs""" # NOTE: In python 2.6, object.__init__ no longer accepts input # arguments. HasTraits does not define an __init__ and # therefore these args were being ignored. # super(TraitedSpec, self).__init__(*args, **kwargs) super(BaseTraitedSpec, self).__init__(**kwargs) traits.push_exception_handler(reraise_exceptions=True) undefined_traits = {} for trait in self.copyable_trait_names(): if not self.traits()[trait].usedefault: undefined_traits[trait] = Undefined self.trait_set(trait_change_notify=False, **undefined_traits) self._generate_handlers() self.trait_set(**kwargs) def items(self): """ Name, trait generator for user modifiable traits """ for name in sorted(self.copyable_trait_names()): yield name, self.traits()[name] def __repr__(self): """ Return a well-formatted representation of the traits """ outstr = [] for name, value in sorted(self.trait_get().items()): outstr.append('%s = %s' % (name, value)) return '\n{}\n'.format('\n'.join(outstr)) def _generate_handlers(self): """Find all traits with the 'xor' metadata and attach an event handler to them. """ has_xor = dict(xor=lambda t: t is not None) xors = self.trait_names(**has_xor) for elem in xors: self.on_trait_change(self._xor_warn, elem) has_deprecation = dict(deprecated=lambda t: t is not None) deprecated = self.trait_names(**has_deprecation) for elem in deprecated: self.on_trait_change(self._deprecated_warn, elem) def _xor_warn(self, obj, name, old, new): """ Generates warnings for xor traits """ if isdefined(new): trait_spec = self.traits()[name] # for each xor, set to default_value for trait_name in trait_spec.xor: if trait_name == name: # skip ourself continue if isdefined(getattr(self, trait_name)): self.trait_set(trait_change_notify=False, **{'%s' % name: Undefined}) msg = ('Input "%s" is mutually exclusive with input "%s", ' 'which is already set') % (name, trait_name) raise IOError(msg) def _requires_warn(self, obj, name, old, new): """Part of the xor behavior """ if isdefined(new): trait_spec = self.traits()[name] msg = None for trait_name in trait_spec.requires: if not isdefined(getattr(self, trait_name)): if not msg: msg = 'Input %s requires inputs: %s' \ % (name, ', '.join(trait_spec.requires)) if msg: # only one requires warning at a time. warn(msg) def _deprecated_warn(self, obj, name, old, new): """Checks if a user assigns a value to a deprecated trait """ if isdefined(new): trait_spec = self.traits()[name] msg1 = ('Input %s in interface %s is deprecated.' % (name, self.__class__.__name__.split('InputSpec')[0])) msg2 = ('Will be removed or raise an error as of release %s' % trait_spec.deprecated) if trait_spec.new_name: if trait_spec.new_name not in self.copyable_trait_names(): raise TraitError(msg1 + ' Replacement trait %s not found' % trait_spec.new_name) msg3 = 'It has been replaced by %s.' % trait_spec.new_name else: msg3 = '' msg = ' '.join((msg1, msg2, msg3)) if Version(str(trait_spec.deprecated)) < self.package_version: raise TraitError(msg) else: if trait_spec.new_name: msg += 'Unsetting old value %s; setting new value %s.' % ( name, trait_spec.new_name) warn(msg) if trait_spec.new_name: self.trait_set(trait_change_notify=False, **{'%s' % name: Undefined, '%s' % trait_spec.new_name: new}) def _hash_infile(self, adict, key): """ Inject file hashes into adict[key]""" stuff = adict[key] if not is_container(stuff): stuff = [stuff] file_list = [] for afile in stuff: if is_container(afile): hashlist = self._hash_infile({'infiles': afile}, 'infiles') hash = [val[1] for val in hashlist] else: if config.get('execution', 'hash_method').lower() == 'timestamp': hash = hash_timestamp(afile) elif config.get('execution', 'hash_method').lower() == 'content': hash = hash_infile(afile) else: raise Exception("Unknown hash method: %s" % config.get('execution', 'hash_method')) file_list.append((afile, hash)) return file_list def get(self, **kwargs): """ Returns traited class as a dict Augments the trait get function to return a dictionary without notification handles """ out = super(BaseTraitedSpec, self).get(**kwargs) out = self._clean_container(out, Undefined) return out def get_traitsfree(self, **kwargs): """ Returns traited class as a dict Augments the trait get function to return a dictionary without any traits. The dictionary does not contain any attributes that were Undefined """ out = super(BaseTraitedSpec, self).get(**kwargs) out = self._clean_container(out, skipundefined=True) return out def _clean_container(self, object, undefinedval=None, skipundefined=False): """Convert a traited obejct into a pure python representation. """ if isinstance(object, TraitDictObject) or isinstance(object, dict): out = {} for key, val in list(object.items()): if isdefined(val): out[key] = self._clean_container(val, undefinedval) else: if not skipundefined: out[key] = undefinedval elif (isinstance(object, TraitListObject) or isinstance(object, list) or isinstance(object, tuple)): out = [] for val in object: if isdefined(val): out.append(self._clean_container(val, undefinedval)) else: if not skipundefined: out.append(undefinedval) else: out.append(None) if isinstance(object, tuple): out = tuple(out) else: if isdefined(object): out = object else: if not skipundefined: out = undefinedval return out def has_metadata(self, name, metadata, value=None, recursive=True): """ Return has_metadata for the requested trait name in this interface """ return has_metadata(self.trait(name).trait_type, metadata, value, recursive) def get_hashval(self, hash_method=None): """Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns ------- dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the traited spec """ dict_withhash = [] dict_nofilename = [] for name, val in sorted(self.get().items()): if not isdefined(val) or self.has_metadata(name, "nohash", True): # skip undefined traits and traits with nohash=True continue hash_files = (not self.has_metadata(name, "hash_files", False) and not self.has_metadata(name, "name_source")) dict_nofilename.append((name, self._get_sorteddict(val, hash_method=hash_method, hash_files=hash_files))) dict_withhash.append((name, self._get_sorteddict(val, True, hash_method=hash_method, hash_files=hash_files))) return dict_withhash, md5(to_str(dict_nofilename).encode()).hexdigest() def _get_sorteddict(self, objekt, dictwithhash=False, hash_method=None, hash_files=True): if isinstance(objekt, dict): out = [] for key, val in sorted(objekt.items()): if isdefined(val): out.append((key, self._get_sorteddict(val, dictwithhash, hash_method=hash_method, hash_files=hash_files))) elif isinstance(objekt, (list, tuple)): out = [] for val in objekt: if isdefined(val): out.append(self._get_sorteddict(val, dictwithhash, hash_method=hash_method, hash_files=hash_files)) if isinstance(objekt, tuple): out = tuple(out) else: if isdefined(objekt): if (hash_files and isinstance(objekt, (str, bytes)) and os.path.isfile(objekt)): if hash_method is None: hash_method = config.get('execution', 'hash_method') if hash_method.lower() == 'timestamp': hash = hash_timestamp(objekt) elif hash_method.lower() == 'content': hash = hash_infile(objekt) else: raise Exception("Unknown hash method: %s" % hash_method) if dictwithhash: out = (objekt, hash) else: out = hash elif isinstance(objekt, float): out = FLOAT_FORMAT(objekt) else: out = objekt return out class DynamicTraitedSpec(BaseTraitedSpec): """ A subclass to handle dynamic traits This class is a workaround for add_traits and clone_traits not functioning well together. """ def __deepcopy__(self, memo): """ bug in deepcopy for HasTraits results in weird cloning behavior for added traits """ id_self = id(self) if id_self in memo: return memo[id_self] dup_dict = deepcopy(self.get(), memo) # access all keys for key in self.copyable_trait_names(): if key in self.__dict__.keys(): _ = getattr(self, key) # clone once dup = self.clone_traits(memo=memo) for key in self.copyable_trait_names(): try: _ = getattr(dup, key) except: pass # clone twice dup = self.clone_traits(memo=memo) dup.trait_set(**dup_dict) return dup class TraitedSpec(BaseTraitedSpec): """ Create a subclass with strict traits. This is used in 90% of the cases. """ _ = traits.Disallow class Interface(object): """This is an abstract definition for Interface objects. It provides no functionality. It defines the necessary attributes and methods all Interface objects should have. """ input_spec = None # A traited input specification output_spec = None # A traited output specification # defines if the interface can reuse partial results after interruption _can_resume = False @property def can_resume(self): return self._can_resume # should the interface be always run even if the inputs were not changed? _always_run = False @property def always_run(self): return self._always_run def __init__(self, **inputs): """Initialize command with given args and inputs.""" raise NotImplementedError @classmethod def help(cls): """ Prints class help""" raise NotImplementedError @classmethod def _inputs_help(cls): """ Prints inputs help""" raise NotImplementedError @classmethod def _outputs_help(cls): """ Prints outputs help""" raise NotImplementedError @classmethod def _outputs(cls): """ Initializes outputs""" raise NotImplementedError @property def version(self): raise NotImplementedError def run(self): """Execute the command.""" raise NotImplementedError def aggregate_outputs(self, runtime=None, needed_outputs=None): """Called to populate outputs""" raise NotImplementedError def _list_outputs(self): """ List expected outputs""" raise NotImplementedError def _get_filecopy_info(self): """ Provides information about file inputs to copy or link to cwd. Necessary for pipeline operation """ raise NotImplementedError class BaseInterfaceInputSpec(TraitedSpec): ignore_exception = traits.Bool(False, usedefault=True, nohash=True, desc='Print an error message instead of throwing an exception ' 'in case the interface fails to run') class BaseInterface(Interface): """Implements common interface functionality. Implements ---------- * Initializes inputs/outputs from input_spec/output_spec * Provides help based on input_spec and output_spec * Checks for mandatory inputs before running an interface * Runs an interface and returns results * Determines which inputs should be copied or linked to cwd This class does not implement aggregate_outputs, input_spec or output_spec. These should be defined by derived classes. This class cannot be instantiated. Relevant Interface attributes ----------------------------- ``input_spec`` points to the traited class for the inputs ``output_spec`` points to the traited class for the outputs ``_redirect_x`` should be set to ``True`` when the interface requires connecting to a ``$DISPLAY`` (default is ``False``). ``resource_monitor`` if ``False`` prevents resource-monitoring this interface, if ``True`` monitoring will be enabled IFF the general Nipype config is set on (``resource_monitor = true``). """ input_spec = BaseInterfaceInputSpec _version = None _additional_metadata = [] _redirect_x = False references_ = [] resource_monitor = True # Enabled for this interface IFF enabled in the config def __init__(self, from_file=None, resource_monitor=None, **inputs): if not self.input_spec: raise Exception('No input_spec in class: %s' % self.__class__.__name__) self.inputs = self.input_spec(**inputs) if resource_monitor is not None: self.resource_monitor = resource_monitor if from_file is not None: self.load_inputs_from_json(from_file, overwrite=True) for name, value in list(inputs.items()): setattr(self.inputs, name, value) @classmethod def help(cls, returnhelp=False): """ Prints class help """ if cls.__doc__: # docstring = cls.__doc__.split('\n') # docstring = [trim(line, '') for line in docstring] docstring = trim(cls.__doc__).split('\n') + [''] else: docstring = [''] allhelp = '\n'.join(docstring + cls._inputs_help() + [''] + cls._outputs_help() + [''] + cls._refs_help() + ['']) if returnhelp: return allhelp else: print(allhelp) @classmethod def _refs_help(cls): """ Prints interface references. """ if not cls.references_: return [] helpstr = ['References::'] for r in cls.references_: helpstr += ['{}'.format(r['entry'])] return helpstr @classmethod def _get_trait_desc(self, inputs, name, spec): desc = spec.desc xor = spec.xor requires = spec.requires argstr = spec.argstr manhelpstr = ['\t%s' % name] type_info = spec.full_info(inputs, name, None) default = '' if spec.usedefault: default = ', nipype default value: %s' % str(spec.default_value()[1]) line = "(%s%s)" % (type_info, default) manhelpstr = wrap(line, 70, initial_indent=manhelpstr[0] + ': ', subsequent_indent='\t\t ') if desc: for line in desc.split('\n'): line = re.sub("\s+", " ", line) manhelpstr += wrap(line, 70, initial_indent='\t\t', subsequent_indent='\t\t') if argstr: pos = spec.position if pos is not None: manhelpstr += wrap('flag: %s, position: %s' % (argstr, pos), 70, initial_indent='\t\t', subsequent_indent='\t\t') else: manhelpstr += wrap('flag: %s' % argstr, 70, initial_indent='\t\t', subsequent_indent='\t\t') if xor: line = '%s' % ', '.join(xor) manhelpstr += wrap(line, 70, initial_indent='\t\tmutually_exclusive: ', subsequent_indent='\t\t ') if requires: others = [field for field in requires if field != name] line = '%s' % ', '.join(others) manhelpstr += wrap(line, 70, initial_indent='\t\trequires: ', subsequent_indent='\t\t ') return manhelpstr @classmethod def _inputs_help(cls): """ Prints description for input parameters """ helpstr = ['Inputs::'] inputs = cls.input_spec() if len(list(inputs.traits(transient=None).items())) == 0: helpstr += ['', '\tNone'] return helpstr manhelpstr = ['', '\t[Mandatory]'] mandatory_items = inputs.traits(mandatory=True) for name, spec in sorted(mandatory_items.items()): manhelpstr += cls._get_trait_desc(inputs, name, spec) opthelpstr = ['', '\t[Optional]'] for name, spec in sorted(inputs.traits(transient=None).items()): if name in mandatory_items: continue opthelpstr += cls._get_trait_desc(inputs, name, spec) if manhelpstr: helpstr += manhelpstr if opthelpstr: helpstr += opthelpstr return helpstr @classmethod def _outputs_help(cls): """ Prints description for output parameters """ helpstr = ['Outputs::', ''] if cls.output_spec: outputs = cls.output_spec() # pylint: disable=E1102 for name, spec in sorted(outputs.traits(transient=None).items()): helpstr += cls._get_trait_desc(outputs, name, spec) if len(helpstr) == 2: helpstr += ['\tNone'] return helpstr def _outputs(self): """ Returns a bunch containing output fields for the class """ outputs = None if self.output_spec: outputs = self.output_spec() # pylint: disable=E1102 return outputs @classmethod def _get_filecopy_info(cls): """ Provides information about file inputs to copy or link to cwd. Necessary for pipeline operation """ info = [] if cls.input_spec is None: return info metadata = dict(copyfile=lambda t: t is not None) for name, spec in sorted(cls.input_spec().traits(**metadata).items()): info.append(dict(key=name, copy=spec.copyfile)) return info def _check_requires(self, spec, name, value): """ check if required inputs are satisfied """ if spec.requires: values = [not isdefined(getattr(self.inputs, field)) for field in spec.requires] if any(values) and isdefined(value): msg = ("%s requires a value for input '%s' because one of %s " "is set. For a list of required inputs, see %s.help()" % (self.__class__.__name__, name, ', '.join(spec.requires), self.__class__.__name__)) raise ValueError(msg) def _check_xor(self, spec, name, value): """ check if mutually exclusive inputs are satisfied """ if spec.xor: values = [isdefined(getattr(self.inputs, field)) for field in spec.xor] if not any(values) and not isdefined(value): msg = ("%s requires a value for one of the inputs '%s'. " "For a list of required inputs, see %s.help()" % (self.__class__.__name__, ', '.join(spec.xor), self.__class__.__name__)) raise ValueError(msg) def _check_mandatory_inputs(self): """ Raises an exception if a mandatory input is Undefined """ for name, spec in list(self.inputs.traits(mandatory=True).items()): value = getattr(self.inputs, name) self._check_xor(spec, name, value) if not isdefined(value) and spec.xor is None: msg = ("%s requires a value for input '%s'. " "For a list of required inputs, see %s.help()" % (self.__class__.__name__, name, self.__class__.__name__)) raise ValueError(msg) if isdefined(value): self._check_requires(spec, name, value) for name, spec in list(self.inputs.traits(mandatory=None, transient=None).items()): self._check_requires(spec, name, getattr(self.inputs, name)) def _check_version_requirements(self, trait_object, raise_exception=True): """ Raises an exception on version mismatch """ unavailable_traits = [] # check minimum version check = dict(min_ver=lambda t: t is not None) names = trait_object.trait_names(**check) if names and self.version: version = LooseVersion(str(self.version)) for name in names: min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) if min_ver > version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue if raise_exception: raise Exception('Trait %s (%s) (version %s < required %s)' % (name, self.__class__.__name__, version, min_ver)) check = dict(max_ver=lambda t: t is not None) names = trait_object.trait_names(**check) for name in names: max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) if max_ver < version: unavailable_traits.append(name) if not isdefined(getattr(trait_object, name)): continue if raise_exception: raise Exception('Trait %s (%s) (version %s > required %s)' % (name, self.__class__.__name__, version, max_ver)) return unavailable_traits def _run_interface(self, runtime): """ Core function that executes interface """ raise NotImplementedError def _duecredit_cite(self): """ Add the interface references to the duecredit citations """ for r in self.references_: r['path'] = self.__module__ due.cite(**r) def run(self, **inputs): """Execute this interface. This interface will not raise an exception if runtime.returncode is non-zero. Parameters ---------- inputs : allows the interface settings to be updated Returns ------- results : an InterfaceResult object containing a copy of the instance that was executed, provenance information and, if successful, results """ from ..utils.profiler import ResourceMonitor enable_rm = config.resource_monitor and self.resource_monitor force_raise = not getattr(self.inputs, 'ignore_exception', False) self.inputs.trait_set(**inputs) self._check_mandatory_inputs() self._check_version_requirements(self.inputs) interface = self.__class__ self._duecredit_cite() # initialize provenance tracking store_provenance = str2bool(config.get( 'execution', 'write_provenance', 'false')) env = deepcopy(dict(os.environ)) if self._redirect_x: env['DISPLAY'] = config.get_display() runtime = Bunch(cwd=os.getcwd(), returncode=None, duration=None, environ=env, startTime=dt.isoformat(dt.utcnow()), endTime=None, platform=platform.platform(), hostname=platform.node(), version=self.version) mon_sp = None if enable_rm: mon_freq = float(config.get('execution', 'resource_monitor_frequency', 1)) proc_pid = os.getpid() iflogger.debug('Creating a ResourceMonitor on a %s interface, PID=%d.', self.__class__.__name__, proc_pid) mon_sp = ResourceMonitor(proc_pid, freq=mon_freq) mon_sp.start() # Grab inputs now, as they should not change during execution inputs = self.inputs.get_traitsfree() outputs = None try: runtime = self._run_interface(runtime) outputs = self.aggregate_outputs(runtime) except Exception as e: import traceback # Retrieve the maximum info fast runtime.traceback = traceback.format_exc() # Gather up the exception arguments and append nipype info. exc_args = e.args if getattr(e, 'args') else tuple() exc_args += ('An exception of type %s occurred while running interface %s.' % (type(e).__name__, self.__class__.__name__), ) if config.get('logging', 'interface_level', 'info').lower() == 'debug': exc_args += ('Inputs: %s' % str(self.inputs),) runtime.traceback_args = ('\n'.join(['%s' % arg for arg in exc_args]),) if force_raise: raise finally: # This needs to be done always runtime.endTime = dt.isoformat(dt.utcnow()) timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) runtime.duration = (timediff.days * 86400 + timediff.seconds + timediff.microseconds / 1e6) results = InterfaceResult(interface, runtime, inputs=inputs, outputs=outputs, provenance=None) # Add provenance (if required) if store_provenance: # Provenance will only throw a warning if something went wrong results.provenance = write_provenance(results) # Make sure runtime profiler is shut down if enable_rm: import numpy as np mon_sp.stop() runtime.mem_peak_gb = None runtime.cpu_percent = None # Read .prof file in and set runtime values vals = np.loadtxt(mon_sp.fname, delimiter=',') if vals.size: vals = np.atleast_2d(vals) runtime.mem_peak_gb = vals[:, 1].max() / 1024 runtime.cpu_percent = vals[:, 2].max() runtime.prof_dict = { 'time': vals[:, 0].tolist(), 'mem_gb': (vals[:, 1] / 1024).tolist(), 'cpus': vals[:, 2].tolist(), } return results def _list_outputs(self): """ List the expected outputs """ if self.output_spec: raise NotImplementedError else: return None def aggregate_outputs(self, runtime=None, needed_outputs=None): """ Collate expected outputs and check for existence """ predicted_outputs = self._list_outputs() outputs = self._outputs() if predicted_outputs: _unavailable_outputs = [] if outputs: _unavailable_outputs = \ self._check_version_requirements(self._outputs()) for key, val in list(predicted_outputs.items()): if needed_outputs and key not in needed_outputs: continue if key in _unavailable_outputs: raise KeyError(('Output trait %s not available in version ' '%s of interface %s. Please inform ' 'developers.') % (key, self.version, self.__class__.__name__)) try: setattr(outputs, key, val) except TraitError as error: if getattr(error, 'info', 'default').startswith('an existing'): msg = ("File/Directory '%s' not found for %s output " "'%s'." % (val, self.__class__.__name__, key)) raise FileNotFoundError(msg) raise error return outputs @property def version(self): if self._version is None: if str2bool(config.get('execution', 'stop_on_unknown_version')): raise ValueError('Interface %s has no version information' % self.__class__.__name__) return self._version def load_inputs_from_json(self, json_file, overwrite=True): """ A convenient way to load pre-set inputs from a JSON file. """ with open(json_file) as fhandle: inputs_dict = json.load(fhandle) def_inputs = [] if not overwrite: def_inputs = list(self.inputs.get_traitsfree().keys()) new_inputs = list(set(list(inputs_dict.keys())) - set(def_inputs)) for key in new_inputs: if hasattr(self.inputs, key): setattr(self.inputs, key, inputs_dict[key]) def save_inputs_to_json(self, json_file): """ A convenient way to save current inputs to a JSON file. """ inputs = self.inputs.get_traitsfree() iflogger.debug('saving inputs {}', inputs) with open(json_file, 'w' if PY3 else 'wb') as fhandle: json.dump(inputs, fhandle, indent=4, ensure_ascii=False) class SimpleInterface(BaseInterface): """ An interface pattern that allows outputs to be set in a dictionary called ``_results`` that is automatically interpreted by ``_list_outputs()`` to find the outputs. When implementing ``_run_interface``, set outputs with:: self._results[out_name] = out_value This can be a way to upgrade a ``Function`` interface to do type checking. Examples -------- >>> def double(x): ... return 2 * x ... >>> class DoubleInputSpec(BaseInterfaceInputSpec): ... x = traits.Float(mandatory=True) ... >>> class DoubleOutputSpec(TraitedSpec): ... doubled = traits.Float() ... >>> class Double(SimpleInterface): ... input_spec = DoubleInputSpec ... output_spec = DoubleOutputSpec ... ... def _run_interface(self, runtime): ... self._results['doubled'] = double(self.inputs.x) ... return runtime >>> dbl = Double() >>> dbl.inputs.x = 2 >>> dbl.run().outputs.doubled 4.0 """ def __init__(self, from_file=None, resource_monitor=None, **inputs): super(SimpleInterface, self).__init__( from_file=from_file, resource_monitor=resource_monitor, **inputs) self._results = {} def _list_outputs(self): return self._results class Stream(object): """Function to capture stdout and stderr streams with timestamps stackoverflow.com/questions/4984549/merge-and-sync-stdout-and-stderr/5188359 """ def __init__(self, name, impl): self._name = name self._impl = impl self._buf = '' self._rows = [] self._lastidx = 0 self.default_encoding = locale.getdefaultlocale()[1] or 'UTF-8' def fileno(self): "Pass-through for file descriptor." return self._impl.fileno() def read(self, drain=0): "Read from the file descriptor. If 'drain' set, read until EOF." while self._read(drain) is not None: if not drain: break def _read(self, drain): "Read from the file descriptor" fd = self.fileno() buf = os.read(fd, 4096).decode(self.default_encoding) if not buf and not self._buf: return None if '\n' not in buf: if not drain: self._buf += buf return [] # prepend any data previously read, then split into lines and format buf = self._buf + buf if '\n' in buf: tmp, rest = buf.rsplit('\n', 1) else: tmp = buf rest = None self._buf = rest now = datetime.datetime.now().isoformat() rows = tmp.split('\n') self._rows += [(now, '%s %s:%s' % (self._name, now, r), r) for r in rows] for idx in range(self._lastidx, len(self._rows)): iflogger.info(self._rows[idx][1]) self._lastidx = len(self._rows) def _canonicalize_env(env): """Windows requires that environment be dicts with bytes as keys and values This function converts any unicode entries for Windows only, returning the dictionary untouched in other environments. Parameters ---------- env : dict environment dictionary with unicode or bytes keys and values Returns ------- env : dict Windows: environment dictionary with bytes keys and values Other: untouched input ``env`` """ if os.name != 'nt': return env out_env = {} for key, val in env: if not isinstance(key, bytes): key = key.encode('utf-8') if not isinstance(val, bytes): val = key.encode('utf-8') out_env[key] = val return out_env def run_command(runtime, output=None, timeout=0.01): """Run a command, read stdout and stderr, prefix with timestamp. The returned runtime contains a merged stdout+stderr log with timestamps """ # Init variables cmdline = runtime.cmdline env = _canonicalize_env(runtime.environ) errfile = None outfile = None stdout = sp.PIPE stderr = sp.PIPE if output == 'file': outfile = os.path.join(runtime.cwd, 'output.nipype') stdout = open(outfile, 'wb') # t=='text'===default stderr = sp.STDOUT elif output == 'file_split': outfile = os.path.join(runtime.cwd, 'stdout.nipype') stdout = open(outfile, 'wb') errfile = os.path.join(runtime.cwd, 'stderr.nipype') stderr = open(errfile, 'wb') elif output == 'file_stdout': outfile = os.path.join(runtime.cwd, 'stdout.nipype') stdout = open(outfile, 'wb') elif output == 'file_stderr': errfile = os.path.join(runtime.cwd, 'stderr.nipype') stderr = open(errfile, 'wb') proc = sp.Popen(cmdline, stdout=stdout, stderr=stderr, shell=True, cwd=runtime.cwd, env=env, close_fds=True, ) result = { 'stdout': [], 'stderr': [], 'merged': [], } if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] def _process(drain=0): try: res = select.select(streams, [], [], timeout) except select.error as e: iflogger.info(e) if e[0] == errno.EINTR: return else: raise else: for stream in res[0]: stream.read(drain) while proc.returncode is None: proc.poll() _process() _process(drain=1) # collect results, merge and return result = {} temp = [] for stream in streams: rows = stream._rows temp += rows result[stream._name] = [r[2] for r in rows] temp.sort() result['merged'] = [r[1] for r in temp] if output == 'allatonce': stdout, stderr = proc.communicate() result['stdout'] = read_stream(stdout, logger=iflogger) result['stderr'] = read_stream(stderr, logger=iflogger) elif output.startswith('file'): proc.wait() if outfile is not None: stdout.flush() stdout.close() with open(outfile, 'rb') as ofh: stdoutstr = ofh.read() result['stdout'] = read_stream(stdoutstr, logger=iflogger) if errfile is not None: stderr.flush() stderr.close() with open(errfile, 'rb') as efh: stderrstr = efh.read() result['stderr'] = read_stream(stderrstr, logger=iflogger) if output == 'file': result['merged'] = result['stdout'] result['stdout'] = [] else: proc.communicate() # Discard stdout and stderr runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) runtime.merged = '\n'.join(result['merged']) runtime.returncode = proc.returncode return runtime def get_dependencies(name, environ): """Return library dependencies of a dynamically linked executable Uses otool on darwin, ldd on linux. Currently doesn't support windows. """ if sys.platform == 'darwin': proc = sp.Popen('otool -L `which %s`' % name, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ) elif 'linux' in sys.platform: proc = sp.Popen('ldd `which %s`' % name, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ) else: return 'Platform %s not supported' % sys.platform o, e = proc.communicate() return o.rstrip() class CommandLineInputSpec(BaseInterfaceInputSpec): args = Str(argstr='%s', desc='Additional parameters to the command') environ = DictStrStr(desc='Environment variables', usedefault=True, nohash=True) # This input does not have a "usedefault=True" so the set_default_terminal_output() # method would work terminal_output = traits.Enum('stream', 'allatonce', 'file', 'none', deprecated='1.0.0', desc=('Control terminal output: `stream` - ' 'displays to terminal immediately (default), ' '`allatonce` - waits till command is ' 'finished to display output, `file` - ' 'writes output to file, `none` - output' ' is ignored'), nohash=True) class CommandLine(BaseInterface): """Implements functionality to interact with command line programs class must be instantiated with a command argument Parameters ---------- command : string define base immutable `command` you wish to run args : string, optional optional arguments passed to base `command` Examples -------- >>> import pprint >>> from nipype.interfaces.base import CommandLine >>> cli = CommandLine(command='ls', environ={'DISPLAY': ':1'}) >>> cli.inputs.args = '-al' >>> cli.cmdline # doctest: +ALLOW_UNICODE 'ls -al' # Use get_traitsfree() to check all inputs set >>> pprint.pprint(cli.inputs.get_traitsfree()) # doctest: +NORMALIZE_WHITESPACE +ALLOW_UNICODE {'args': '-al', 'environ': {'DISPLAY': ':1'}, 'ignore_exception': False} >>> cli.inputs.get_hashval()[0][0] # doctest: +ALLOW_UNICODE ('args', '-al') >>> cli.inputs.get_hashval()[1] # doctest: +ALLOW_UNICODE '11c37f97649cd61627f4afe5136af8c0' """ input_spec = CommandLineInputSpec _cmd = None _version = None _terminal_output = 'stream' @classmethod def set_default_terminal_output(cls, output_type): """Set the default terminal output for CommandLine Interfaces. This method is used to set default terminal output for CommandLine Interfaces. However, setting this will not update the output type for any existing instances. For these, assign the <instance>.terminal_output. """ if output_type in VALID_TERMINAL_OUTPUT: cls._terminal_output = output_type else: raise AttributeError('Invalid terminal output_type: %s' % output_type) @classmethod def help(cls, returnhelp=False): allhelp = 'Wraps command **{cmd}**\n\n{help}'.format( cmd=cls._cmd, help=super(CommandLine, cls).help(returnhelp=True)) if returnhelp: return allhelp print(allhelp) def __init__(self, command=None, terminal_output=None, **inputs): super(CommandLine, self).__init__(**inputs) self._environ = None # Set command. Input argument takes precedence self._cmd = command or getattr(self, '_cmd', None) if self._cmd is None: raise Exception("Missing command") if terminal_output is not None: self.terminal_output = terminal_output # Attach terminal_output callback for backwards compatibility self.inputs.on_trait_change(self._terminal_output_update, 'terminal_output') @property def cmd(self): """sets base command, immutable""" return self._cmd @property def cmdline(self): """ `command` plus any arguments (args) validates arguments and generates command line""" self._check_mandatory_inputs() allargs = [self.cmd] + self._parse_inputs() return ' '.join(allargs) @property def terminal_output(self): return self._terminal_output @terminal_output.setter def terminal_output(self, value): if value not in VALID_TERMINAL_OUTPUT: raise RuntimeError( 'Setting invalid value "%s" for terminal_output. Valid values are ' '%s.' % (value, ', '.join(['"%s"' % v for v in VALID_TERMINAL_OUTPUT]))) self._terminal_output = value def _terminal_output_update(self): self.terminal_output = self.terminal_output def raise_exception(self, runtime): raise RuntimeError( ('Command:\n{cmdline}\nStandard output:\n{stdout}\n' 'Standard error:\n{stderr}\nReturn code: {returncode}').format( **runtime.dictcopy())) def _get_environ(self): return getattr(self.inputs, 'environ', {}) def version_from_command(self, flag='-v'): cmdname = self.cmd.split()[0] env = dict(os.environ) if _exists_in_path(cmdname, env): out_environ = self._get_environ() env.update(out_environ) proc = sp.Popen(' '.join((cmdname, flag)), shell=True, env=env, stdout=sp.PIPE, stderr=sp.PIPE, ) o, e = proc.communicate() return o def _run_interface(self, runtime, correct_return_codes=(0,)): """Execute command via subprocess Parameters ---------- runtime : passed by the run function Returns ------- runtime : updated runtime information adds stdout, stderr, merged, cmdline, dependencies, command_path """ out_environ = self._get_environ() # Initialize runtime Bunch runtime.stdout = None runtime.stderr = None runtime.cmdline = self.cmdline runtime.environ.update(out_environ) # which $cmd executable_name = self.cmd.split()[0] exist_val, cmd_path = _exists_in_path(executable_name, runtime.environ) if not exist_val: raise IOError("command '%s' could not be found on host %s" % (self.cmd.split()[0], runtime.hostname)) runtime.command_path = cmd_path runtime.dependencies = get_dependencies(executable_name, runtime.environ) runtime = run_command(runtime, output=self.terminal_output) if runtime.returncode is None or \ runtime.returncode not in correct_return_codes: self.raise_exception(runtime) return runtime def _format_arg(self, name, trait_spec, value): """A helper function for _parse_inputs Formats a trait containing argstr metadata """ argstr = trait_spec.argstr iflogger.debug('%s_%s', name, value) if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr: # Boolean options have no format string. Just append options if True. return argstr if value else None # traits.Either turns into traits.TraitCompound and does not have any # inner_traits elif trait_spec.is_trait_type(traits.List) \ or (trait_spec.is_trait_type(traits.TraitCompound) and isinstance(value, list)): # This is a bit simple-minded at present, and should be # construed as the default. If more sophisticated behavior # is needed, it can be accomplished with metadata (e.g. # format string for list member str'ification, specifying # the separator, etc.) # Depending on whether we stick with traitlets, and whether or # not we beef up traitlets.List, we may want to put some # type-checking code here as well sep = trait_spec.sep if trait_spec.sep is not None else ' ' if argstr.endswith('...'): # repeatable option # --id %d... will expand to # --id 1 --id 2 --id 3 etc.,. argstr = argstr.replace('...', '') return sep.join([argstr % elt for elt in value]) else: return argstr % sep.join(str(elt) for elt in value) else: # Append options using format string. return argstr % value def _filename_from_source(self, name, chain=None): if chain is None: chain = [] trait_spec = self.inputs.trait(name) retval = getattr(self.inputs, name) source_ext = None if not isdefined(retval) or "%s" in retval: if not trait_spec.name_source: return retval if isdefined(retval) and "%s" in retval: name_template = retval else: name_template = trait_spec.name_template if not name_template: name_template = "%s_generated" ns = trait_spec.name_source while isinstance(ns, (list, tuple)): if len(ns) > 1: iflogger.warning('Only one name_source per trait is allowed') ns = ns[0] if not isinstance(ns, (str, bytes)): raise ValueError( 'name_source of \'{}\' trait should be an input trait ' 'name, but a type {} object was found'.format(name, type(ns))) if isdefined(getattr(self.inputs, ns)): name_source = ns source = getattr(self.inputs, name_source) while isinstance(source, list): source = source[0] # special treatment for files try: _, base, source_ext = split_filename(source) except (AttributeError, TypeError): base = source else: if name in chain: raise NipypeInterfaceError('Mutually pointing name_sources') chain.append(name) base = self._filename_from_source(ns, chain) if isdefined(base): _, _, source_ext = split_filename(base) chain = None retval = name_template % base _, _, ext = split_filename(retval) if trait_spec.keep_extension and (ext or source_ext): if (ext is None or not ext) and source_ext: retval = retval + source_ext else: retval = self._overload_extension(retval, name) return retval def _gen_filename(self, name): raise NotImplementedError def _overload_extension(self, value, name=None): return value def _list_outputs(self): metadata = dict(name_source=lambda t: t is not None) traits = self.inputs.traits(**metadata) if traits: outputs = self.output_spec().get() # pylint: disable=E1102 for name, trait_spec in list(traits.items()): out_name = name if trait_spec.output_name is not None: out_name = trait_spec.output_name outputs[out_name] = \ os.path.abspath(self._filename_from_source(name)) return outputs def _parse_inputs(self, skip=None): """Parse all inputs using the ``argstr`` format string in the Trait. Any inputs that are assigned (not the default_value) are formatted to be added to the command line. Returns ------- all_args : list A list of all inputs formatted for the command line. """ all_args = [] initial_args = {} final_args = {} metadata = dict(argstr=lambda t: t is not None) for name, spec in sorted(self.inputs.traits(**metadata).items()): if skip and name in skip: continue value = getattr(self.inputs, name) if spec.name_source: value = self._filename_from_source(name) elif spec.genfile: if not isdefined(value) or value is None: value = self._gen_filename(name) if not isdefined(value): continue arg = self._format_arg(name, spec, value) if arg is None: continue pos = spec.position if pos is not None: if int(pos) >= 0: initial_args[pos] = arg else: final_args[pos] = arg else: all_args.append(arg) first_args = [el for _, el in sorted(initial_args.items())] last_args = [el for _, el in sorted(final_args.items())] return first_args + all_args + last_args class StdOutCommandLineInputSpec(CommandLineInputSpec): out_file = File(argstr="> %s", position=-1, genfile=True) class StdOutCommandLine(CommandLine): input_spec = StdOutCommandLineInputSpec def _gen_filename(self, name): return self._gen_outfilename() if name == 'out_file' else None def _gen_outfilename(self): raise NotImplementedError class MpiCommandLineInputSpec(CommandLineInputSpec): use_mpi = traits.Bool(False, desc="Whether or not to run the command with mpiexec", usedefault=True) n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not " "specify if this is managed externally (e.g. through " "SGE)") class MpiCommandLine(CommandLine): """Implements functionality to interact with command line programs that can be run with MPI (i.e. using 'mpiexec'). Examples -------- >>> from nipype.interfaces.base import MpiCommandLine >>> mpi_cli = MpiCommandLine(command='my_mpi_prog') >>> mpi_cli.inputs.args = '-v' >>> mpi_cli.cmdline # doctest: +ALLOW_UNICODE 'my_mpi_prog -v' >>> mpi_cli.inputs.use_mpi = True >>> mpi_cli.inputs.n_procs = 8 >>> mpi_cli.cmdline # doctest: +ALLOW_UNICODE 'mpiexec -n 8 my_mpi_prog -v' """ input_spec = MpiCommandLineInputSpec @property def cmdline(self): """Adds 'mpiexec' to begining of command""" result = [] if self.inputs.use_mpi: result.append('mpiexec') if self.inputs.n_procs: result.append('-n %d' % self.inputs.n_procs) result.append(super(MpiCommandLine, self).cmdline) return ' '.join(result) class SEMLikeCommandLine(CommandLine): """In SEM derived interface all outputs have corresponding inputs. However, some SEM commands create outputs that are not defined in the XML. In those cases one has to create a subclass of the autogenerated one and overload the _list_outputs method. _outputs_from_inputs should still be used but only for the reduced (by excluding those that do not have corresponding inputs list of outputs. """ def _list_outputs(self): outputs = self.output_spec().get() # pylint: disable=E1102 return self._outputs_from_inputs(outputs) def _outputs_from_inputs(self, outputs): for name in list(outputs.keys()): corresponding_input = getattr(self.inputs, name) if isdefined(corresponding_input): if (isinstance(corresponding_input, bool) and corresponding_input): outputs[name] = \ os.path.abspath(self._outputs_filenames[name]) else: if isinstance(corresponding_input, list): outputs[name] = [os.path.abspath(inp) for inp in corresponding_input] else: outputs[name] = os.path.abspath(corresponding_input) return outputs def _format_arg(self, name, spec, value): if name in list(self._outputs_filenames.keys()): if isinstance(value, bool): if value: value = os.path.abspath(self._outputs_filenames[name]) else: return "" return super(SEMLikeCommandLine, self)._format_arg(name, spec, value) class MultiPath(traits.List): """ Abstract class - shared functionality of input and output MultiPath """ def validate(self, object, name, value): # want to treat range and other sequences (except str) as list if not isinstance(value, (str, bytes)) and isinstance(value, collections.Sequence): value = list(value) if not isdefined(value) or \ (isinstance(value, list) and len(value) == 0): return Undefined newvalue = value if not isinstance(value, list) \ or (self.inner_traits() and isinstance(self.inner_traits()[0].trait_type, traits.List) and not isinstance(self.inner_traits()[0].trait_type, InputMultiPath) and isinstance(value, list) and value and not isinstance(value[0], list)): newvalue = [value] value = super(MultiPath, self).validate(object, name, newvalue) if value: return value self.error(object, name, value) class OutputMultiPath(MultiPath): """ Implements a user friendly traits that accepts one or more paths to files or directories. This is the output version which return a single string whenever possible (when it was set to a single value or a list of length 1). Default value of this trait is _Undefined. It does not accept empty lists. XXX This should only be used as a final resort. We should stick to established Traits to the extent possible. XXX This needs to be vetted by somebody who understands traits >>> from nipype.interfaces.base import OutputMultiPath >>> class A(TraitedSpec): ... foo = OutputMultiPath(File(exists=False)) >>> a = A() >>> a.foo <undefined> >>> a.foo = '/software/temp/foo.txt' >>> a.foo # doctest: +ALLOW_UNICODE '/software/temp/foo.txt' >>> a.foo = ['/software/temp/foo.txt'] >>> a.foo # doctest: +ALLOW_UNICODE '/software/temp/foo.txt' >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] >>> a.foo # doctest: +ALLOW_UNICODE ['/software/temp/foo.txt', '/software/temp/goo.txt'] """ def get(self, object, name): value = self.get_value(object, name) if len(value) == 0: return Undefined elif len(value) == 1: return value[0] else: return value def set(self, object, name, value): self.set_value(object, name, value) class InputMultiPath(MultiPath): """ Implements a user friendly traits that accepts one or more paths to files or directories. This is the input version which always returns a list. Default value of this trait is _Undefined. It does not accept empty lists. XXX This should only be used as a final resort. We should stick to established Traits to the extent possible. XXX This needs to be vetted by somebody who understands traits >>> from nipype.interfaces.base import InputMultiPath >>> class A(TraitedSpec): ... foo = InputMultiPath(File(exists=False)) >>> a = A() >>> a.foo <undefined> >>> a.foo = '/software/temp/foo.txt' >>> a.foo # doctest: +ALLOW_UNICODE ['/software/temp/foo.txt'] >>> a.foo = ['/software/temp/foo.txt'] >>> a.foo # doctest: +ALLOW_UNICODE ['/software/temp/foo.txt'] >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] >>> a.foo # doctest: +ALLOW_UNICODE ['/software/temp/foo.txt', '/software/temp/goo.txt'] """ pass
bsd-3-clause
2,941,847,679,055,743,500
34.888889
99
0.5484
false
dipanshunagar/PySyft
syft/tensor.py
1
52597
# -*- coding: utf-8 -*- import numpy as np import syft import scipy import pickle __all__ = [ 'equal', 'TensorBase', ] def _ensure_ndarray(arr): if not isinstance(arr, np.ndarray): arr = np.array(arr) return arr def _ensure_tensorbase(tensor): if not isinstance(tensor, TensorBase): tensor = TensorBase(tensor) return tensor def equal(tensor1, tensor2): """Checks if two tensors are equal. Two tensors are considered equal if they are the same size and contain the same elements. Assumption: tensor1 and tensor2 are of type TensorBase. Non-TensorBase objects will be converted to TensorBase objects. """ tensor1 = _ensure_tensorbase(tensor1) tensor2 = _ensure_tensorbase(tensor2) if tensor1.encrypted or tensor2.encrypted: return NotImplemented left = tensor1.data.shape == tensor2.data.shape right = np.allclose(tensor1.data, tensor2.data) return left and right class TensorBase(object): """ A base tensor class that performs basic element-wise operation such as addition, subtraction, multiplication and division, and also dot and matrix products. """ _mul_depth = 0 _add_depth = 0 def __init__(self, arr_like, encrypted=False): self.data = _ensure_ndarray(arr_like) self.encrypted = encrypted def _calc_mul_depth(self, tensor1, tensor2): if isinstance(tensor1, TensorBase) and isinstance(tensor2, TensorBase): self._mul_depth = max(tensor1._mul_depth, tensor2._mul_depth) + 1 elif isinstance(tensor1, TensorBase): self._mul_depth = tensor1._mul_depth + 1 elif isinstance(tensor2, TensorBase): self._mul_depth = tensor2._mul_depth + 1 def _calc_add_depth(self, tensor1, tensor2): if isinstance(tensor1, TensorBase) and isinstance(tensor2, TensorBase): self._add_depth = max(tensor1._add_depth, tensor2._add_depth) + 1 elif isinstance(tensor1, TensorBase): self._add_depth = tensor1._add_depth + 1 elif isinstance(tensor2, TensorBase): self._add_depth = tensor2._add_depth + 1 def encrypt(self, pubkey): """Encrypts the Tensor using a Public Key""" if self.encrypted: return NotImplemented else: if(type(pubkey) == syft.he.paillier.keys.PublicKey): out = syft.he.paillier.PaillierTensor(pubkey, self.data) return out else: return NotImplemented def decrypt(self, seckey): """Decrypts the tensor using a Secret Key""" if self.encrypted: return seckey.decrypt(self) else: return self def __len__(self): return len(self.data) def __add__(self, tensor): """Performs element-wise addition between two tensors""" if self.encrypted: return NotImplemented tensor = _ensure_tensorbase(tensor) return TensorBase(self.data + tensor.data) def __iadd__(self, tensor): """Performs in place element-wise addition between two tensors""" if self.encrypted: return NotImplemented tensor = _ensure_tensorbase(tensor) self.data += tensor.data return self def __sub__(self, tensor): """Performs element-wise subtraction between two tensors""" if self.encrypted: return NotImplemented tensor = _ensure_tensorbase(tensor) return TensorBase(self.data - tensor.data) def __isub__(self, tensor): """Performs in place element-wise subtraction between two tensors""" if self.encrypted: return NotImplemented tensor = _ensure_tensorbase(tensor) self.data -= tensor.data return self def __eq__(self, tensor): """Checks if two tensors are equal""" if self.encrypted: return NotImplemented return syft.equal(self, tensor) def dot(self, tensor): """Returns inner product of two tensors""" if self.encrypted: return NotImplemented if tensor.encrypted: return tensor.dot(self) return syft.dot(self, tensor) def __matmul__(self, tensor): """Performs matrix multiplication between two tensors""" if self.encrypted: return NotImplemented return syft.matmul(self, tensor) def __mul__(self, tensor): """Performs element-wise multiplication between two tensors""" if self.encrypted: return NotImplemented # if it's a sub-class of TensorBase, use the multiplication of that # subclass not this one. if(type(tensor) != TensorBase and isinstance(tensor, TensorBase)): return tensor * self else: tensor = _ensure_tensorbase(tensor) return TensorBase(tensor.data * self.data) def __imul__(self, tensor): """Performs in place element-wise multiplication between two tensors""" if self.encrypted: return NotImplemented if(type(tensor) != TensorBase and isinstance(tensor, TensorBase)): self.data = tensor.data * self.data self.encrypted = tensor.encrypted else: tensor = _ensure_tensorbase(tensor) self.data *= tensor.data return self def __truediv__(self, tensor): """Performs element-wise division between two tensors""" if self.encrypted: return NotImplemented if(type(tensor) != TensorBase and isinstance(tensor, TensorBase)): return NotImplemented # it's not clear that this can be done else: tensor = _ensure_tensorbase(tensor) return TensorBase(self.data / tensor.data) def __itruediv__(self, tensor): """Performs in place element-wise subtraction between two tensors""" if self.encrypted: return NotImplemented tensor = _ensure_tensorbase(tensor) self.data = self.data / tensor.data return self def __setitem__(self, key, value): if(self.encrypted): return NotImplemented else: self.data[key] = value return self def __getitem__(self, position): """Get value at a specific index.""" if self.encrypted: return NotImplemented else: out = self.data[position] if(len(self.shape()) == 1): return out else: return TensorBase(self.data[position], self.encrypted) def abs(self): """Returns absolute value of tensor as a new tensor""" if self.encrypted: return NotImplemented return np.absolute(self.data) def abs_(self): """Replaces tensor values with its absolute value""" if self.encrypted: return NotImplemented self.data = np.absolute(self.data) return self.data def shape(self): """Returns a tuple of input array dimensions.""" if self.encrypted: return NotImplemented return self.data.shape def sqrt(self): """Returns the squared tensor.""" if self.encrypted: return NotImplemented return np.sqrt(self.data) def sqrt_(self): """Inline squared tensor.""" if self.encrypted: return NotImplemented self.data = np.sqrt(self.data) def dim(self): """Returns an integer of the number of dimensions of this tensor.""" return self.data.ndim def sum(self, dim=None): """Returns the sum of all elements in the input array.""" if self.encrypted: return NotImplemented if dim is None: return self.data.sum() else: return self.data.sum(axis=dim) def ceil(self): """Returns the ceilling of the input tensor elementwise.""" if self.encrypted: return NotImplemented return syft.math.ceil(self.data) def ceil_(self): """Returns the ceilling of the input tensor elementwise.""" if self.encrypted: return NotImplemented self.data = syft.math.ceil(self.data).data return self def floor_(self): """Inplace floor method""" if self.encrypted: return NotImplemented self.data = syft.math.floor(self.data).data return self def zero_(self): """Replaces tensor values with zeros""" if self.encrypted: return NotImplemented self.data.fill(0) return self.data def addmm(self, tensor2, mat, beta=1, alpha=1): """Performs ((Mat*Beta)+((Tensor1@Tensor2)*Alpha)) and returns the result as a Tensor Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way. *If both tensors are 1-dimensional, their dot product is returned. *If both arguments are 2-D they are multiplied like conventional matrices. *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. """ return syft.addmm(self, tensor2, mat, beta, alpha) def addmm_(self, tensor2, mat, beta=1, alpha=1): """Performs ((Mat*Beta)+((Tensor1@Tensor2)*Alpha)) and updates Tensor1 with result and reurns it Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way. *If both tensors are 1-dimensional, their dot product is returned. *If both arguments are 2-D they are multiplied like conventional matrices. *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. """ _ensure_tensorbase(tensor2) _ensure_tensorbase(mat) if self.encrypted or tensor2.encrypted or mat.encrypted: return NotImplemented else: self.data = np.array((np.matmul(self.data, tensor2.data))) self.data *= alpha mat.data *= beta self.data = self.data + mat.data return self def addcmul(self, tensor2, mat, value=1): """Performs the element-wise multiplication of tensor1 by tensor2, multiply the result by the scalar value and add it to mat.""" return syft.addcmul(self, tensor2, mat, value) def addcmul_(self, tensor2, mat, value=1): """Performs implace element-wise multiplication of tensor1 by tensor2, multiply the result by the scalar value and add it to mat.""" _ensure_tensorbase(tensor2) _ensure_tensorbase(mat) if self.encrypted or tensor2.encrypted or mat.encrypted: return NotImplemented else: self.data *= tensor2.data self.data *= value self.data += mat.data return self def addcdiv(self, tensor2, mat, value=1): """Performs the element-wise division of tensor1 by tensor2, multiply the result by the scalar value and add it to mat.""" return syft.addcdiv(self, tensor2, mat, value) def addcdiv_(self, tensor2, mat, value=1): """Performs implace element-wise division of tensor1 by tensor2, multiply the result by the scalar value and add it to mat.""" _ensure_tensorbase(tensor2) _ensure_tensorbase(mat) if self.encrypted or tensor2.encrypted or mat.encrypted: return NotImplemented else: self.data = self.data / tensor2.data self.data *= value self.data += mat.data return self def addmv(self, mat, vec, beta=1, alpha=1): """"Performs a matrix-vector product of the matrix mat and the vector vec. The vector tensor is added to the final result. tensor1 and vec are 1d tensors out=(beta∗tensor)+(alpha∗(mat@vec2))""" return syft.addmv(self, mat, vec, beta, alpha) def addmv_(self, mat, vec, beta=1, alpha=1): """"Performs a inplace matrix-vector product of the matrix mat and the vector vec. The vector tensor is added to the final result. tensor1 and vec are 1d tensors out=(beta∗tensor)+(alpha∗(mat@vec2))""" _ensure_tensorbase(vec) _ensure_tensorbase(mat) if vec.data.ndim != 1: print("dimension of vec is not 1") elif self.data.ndim != 1: print("dimension of tensor is not 1") elif self.encrypted or vec.encrypted or mat.encrypted: return NotImplemented else: self *= beta temp = np.matmul(mat.data, vec.data) * alpha self += temp return self def addbmm(self, tensor2, mat, beta=1, alpha=1): """Performs a batch matrix-matrix product of matrices stored in batch1(tensor1) and batch2(tensor2), with a reduced add step (all matrix multiplications get accumulated along the first dimension). mat is added to the final result. res=(beta∗M)+(alpha∗sum(batch1i@batch2i, i=0, b)) * batch1 and batch2 must be 3D Tensors each containing the same number of matrices.""" return syft.addbmm(self, tensor2, mat, beta, alpha) def addbmm_(self, tensor2, mat, beta=1, alpha=1): """Performs a inplace batch matrix-matrix product of matrices stored in batch1(tensor1) and batch2(tensor2), with a reduced add step (all matrix multiplications get accumulated along the first dimension). mat is added to the final result. res=(beta∗M)+(alpha∗sum(batch1i@batch2i, i=0, b) * batch1 and batch2 must be 3D Tensors each containing the same number of matrices.)""" _ensure_tensorbase(tensor2) _ensure_tensorbase(mat) if tensor2.data.ndim != 3: print("dimension of tensor2 is not 3") elif self.data.ndim != 3: print("dimension of tensor1 is not 3") elif self.encrypted or tensor2.encrypted or mat.encrypted: return NotImplemented else: self.data = np.matmul(self.data, tensor2.data) sum_ = 0 # sum is a python built in function a keyword ! for i in range(len(self.data)): sum_ += self.data[i] self.data = (mat.data * beta) + (alpha * sum_) return self def baddbmm(self, tensor2, mat, beta=1, alpha=1): """Performs a batch matrix-matrix product of matrices in batch1(tensor1) and batch2(tensor2). mat is added to the final result. resi=(beta∗Mi)+(alpha∗batch1i×batch2i) *batch1 and batch2 must be 3D Tensors each containing the same number of matrices.""" return syft.baddbmm(self, tensor2, mat, beta, alpha) def baddbmm_(self, tensor2, mat, beta=1, alpha=1): """Performs a batch matrix-matrix product of matrices in batch1(tensor1) and batch2(tensor2). mat is added to the final result. resi=(beta∗Mi)+(alpha∗batch1i×batch2i) *batch1 and batch2 must be 3D Tensors each containing the same number of matrices.""" _ensure_tensorbase(tensor2) _ensure_tensorbase(mat) if tensor2.data.ndim != 3: print("dimension of tensor2 is not 3") elif self.data.ndim != 3: print("dimension of tensor1 is not 3") elif self.encrypted or tensor2.encrypted or mat.encrypted: return NotImplemented else: self.data = np.matmul(self.data, tensor2.data) self.data *= alpha self.data += (mat.data * beta) return self def max(self, axis=None): """ If axis is not specified, finds the largest element in the tensor. Otherwise, reduces along the specified axis. """ if self.encrypted: return NotImplemented if axis is None: return _ensure_tensorbase(np.max(self.data)) return _ensure_tensorbase(np.max(self.data, axis)) def permute(self, dims): """ Permute the dimensions of this tensor. Parameters: *dims (int...) – The desired ordering of dimensions """ if self.encrypted: return NotImplemented if dims is None: raise ValueError("dims cannot be none") return _ensure_tensorbase(np.transpose(self.data, dims)) def transpose(self, dim0, dim1): """ Returns the transpose along the dimensions in a new Tensor. """ return syft.transpose(self.data, dim0, dim1) def transpose_(self, dim0, dim1): """ Replaces the Tensor with its transpose along the dimensions. """ num_dims = len(self.data.shape) axes = list(range(num_dims)) if dim0 >= num_dims: print("dimension 0 out of range") elif dim1 >= num_dims: print("dimension 1 out of range") elif self.encrypted: raise NotImplemented else: axes[dim0] = dim1 axes[dim1] = dim0 self.data = np.transpose(self.data, axes=tuple(axes)) def t(self): """ Returns the transpose along dimensions 0, 1 in a new Tensor. """ return self.transpose(0, 1) def t_(self): """ Replaces the Tensor with its transpose along dimensions 0, 1. """ self.transpose_(0, 1) def unsqueeze(self, dim): """ Returns expanded Tensor. An additional dimension of size one is added to at index 'dim'. """ return syft.unsqueeze(self.data, dim) def unsqueeze_(self, dim): """ Replaces with an expanded Tensor. An additional dimension of size one is added to at index 'dim'. """ num_dims = len(self.data.shape) if dim >= num_dims or dim < 0: print("dimension out of range") elif self.encrypted: raise NotImplemented else: self.data = np.expand_dims(self.data, dim) def exp(self): """Computes the exponential of each element in tensor.""" if self.encrypted: return NotImplemented out = np.exp(self.data) return TensorBase(out) def exp_(self): """Computes the exponential of each element inplace.""" if self.encrypted: return NotImplemented self.data = np.exp(self.data) return self def frac(self): """"Computes the fractional portion of each element in tensor.""" if self.encrypted: return NotImplemented out = np.modf(self.data)[0] return TensorBase(out) def frac_(self): """"Computes the fractional portion of each element inplace.""" if self.encrypted: return NotImplemented self.data = np.modf(self.data)[0] return self def sigmoid_(self): """ Performs inline sigmoid function on the Tensor elementwise Implementation details: Because of the way syft.math.sigmoid operates on a Tensor Object calling it on self.data will cause an input error thus we call sigmoid on the tensor object and we take the member 'data' from the returned Tensor """ if self.encrypted: return NotImplemented self.data = syft.math.sigmoid(self).data # self.data = np.array((1 / (1 + np.exp(np.array(-self.data))))) return self def tanh_(self): """ Performs tanh (hyperbolic tangent) function on the Tensor elementwise """ if self.encrypted: return NotImplemented self.data = syft.math.tanh(self).data # self.data = np.array(np.tanh(np.array(self.data))) return self def __str__(self): return "BaseTensor: " + str(self.data) def __repr__(self): return "BaseTensor: " + repr(self.data) def rsqrt(self): """Returns reciprocal of square root of Tensor element wise""" if self.encrypted: return NotImplemented out = 1 / np.sqrt(self.data) return TensorBase(out) def rsqrt_(self): """Computes reciprocal of square root of Tensor elements inplace""" if self.encrypted: return NotImplemented self.data = 1 / np.sqrt(self.data) def sign(self): """Return a tensor that contains sign of each element """ if self.encrypted: return NotImplemented out = np.sign(self.data) return TensorBase(out) def sign_(self): """Computes the sign of each element of the Tensor inplace""" if self.encrypted: return NotImplemented self.data = np.sign(self.data) def to_numpy(self): """Returns the tensor as numpy.ndarray""" if self.encrypted: return NotImplemented return np.array(self.data) def reciprocal(self): """Computes element wise reciprocal""" if self.encrypted: return NotImplemented out = 1 / np.array(self.data) return TensorBase(out) def reciprocal_(self): """Computes element wise reciprocal""" if self.encrypted: return NotImplemented self.data = 1 / np.array(self.data) def log(self): """performs elementwise logarithm operation and returns a new Tensor""" if self.encrypted: return NotImplemented out = np.log(self.data) return TensorBase(out) def log_(self): """performs elementwise logarithm operation inplace""" if self.encrypted: return NotImplemented self.data = np.log(self.data) return self def log1p(self): """performs elementwise log(1+x) operation and returns new tensor""" if self.encrypted: return NotImplemented out = np.log1p(self.data) return TensorBase(out) def log1p_(self): """performs elementwise log(1+x) operation inplace""" if self.encrypted: return NotImplemented self.data = np.log1p(self.data) return self def log_normal_(self, mean=0, stdev=1.0): """Fills give tensor with samples from a lognormal distribution with given mean and stdev""" if self.encrypted: return NotImplemented self.data = np.random.lognormal(mean, stdev, self.shape()) return self def clamp(self, minimum=None, maximum=None): """Returns a clamped tensor into the range [min, max], elementwise""" if self.encrypted: return NotImplemented return TensorBase(np.clip(self.data, a_min=minimum, a_max=maximum)) def clamp_(self, minimum=None, maximum=None): """Clamp the tensor, in-place, elementwise into the range [min, max]""" if self.encrypted: return NotImplemented self.data = np.clip(self.data, a_min=minimum, a_max=maximum) return self def clone(self): """Returns a copy of the tensor. The copy has the same size and data type as the original tensor.""" if self.encrypted: return NotImplemented return TensorBase(np.copy(self.data)) def chunk(self, n, dim=0, same_size=False): """Returns a list of tensors by splitting the tensor into a number of chunks along a given dimension. Raises an exception if same_size is set to True and given tensor can't be split in n same-size chunks along dim.""" if self.encrypted: return NotImplemented if same_size: return [TensorBase(x) for x in np.split(self.data, n, dim)] else: return [TensorBase(x) for x in np.array_split(self.data, n, dim)] def gt(self, other): """Returns a new Tensor having boolean True values where an element of the calling tensor is greater than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented return TensorBase(np.greater(self.data, other.data)) def gt_(self, other): """Writes in-place, boolean True values where an element of the calling tensor is greater than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented self.data = np.greater(self.data, other.data) return self def lt(self, other): """Returns a new Tensor having boolean True values where an element of the calling tensor is less than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented return TensorBase(np.less(self.data, other.data)) def lt_(self, other): """Writes in-place, boolean True values where an element of the calling tensor is less than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented self.data = np.less(self.data, other.data) return self def ge(self, other): """Returns a new Tensor having boolean True values where an element of the calling tensor is greater or equal than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented return TensorBase(np.greater_equal(self.data, other.data)) def ge_(self, other): """Writes in-place, boolean True values where an element of the calling tensor is greater or equal than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented self.data = np.greater_equal(self.data, other.data) return self def le(self, other): """Returns a new Tensor having boolean True values where an element of the calling tensor is less or equal than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented return TensorBase(np.less_equal(self.data, other.data)) def le_(self, other): """Writes in-place, boolean True values where an element of the calling tensor is less or equal than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" other = _ensure_tensorbase(other) if self.encrypted or other.encrypted: return NotImplemented self.data = np.less_equal(self.data, other.data) return self def bernoulli(self, p): """ Returns a Tensor filled with binary random numbers (0 or 1) from a bernoulli distribution with probability and shape specified by p(arr_like). The p Tensor should be a tensor containing probabilities to be used for drawing the binary random number. Hence, all values in p have to be in the range: 0<=p<=1 """ if self.encrypted: return NotImplemented p = _ensure_tensorbase(p) return TensorBase(np.random.binomial(1, p.data)) def bernoulli_(self, p): """ Fills the Tensor in-place with binary random numbers (0 or 1) from a bernoulli distribution with probability and shape specified by p(arr_like) The p Tensor should be a tensor containing probabilities to be used for drawing the binary random number. Hence, all values in p have to be in the range: 0<=p<=1 """ if self.encrypted: return NotImplemented p = _ensure_tensorbase(p) self.data = np.random.binomial(1, p.data) return self def uniform_(self, low=0, high=1): """Fills the tensor in-place with numbers sampled unifromly over the half-open interval [low,high) or from the uniform distribution""" if self.encrypted: return NotImplemented self.data = np.random.uniform(low=low, high=high, size=self.shape()) return self def uniform(self, low=0, high=1): """Returns a new tensor filled with numbers sampled unifromly over the half-open interval [low,high) or from the uniform distribution""" if self.encrypted: return NotImplemented out = np.random.uniform(low=low, high=high, size=self.shape()) return TensorBase(out) def fill_(self, value): """Fills the tensor in-place with the specified value""" if self.encrypted: return NotImplemented self.data.fill(value) return self def tolist(self): """Returns a new tensor as (possibly a nested) list""" if self.encrypted: return NotImplemented out = self.data.tolist() return out def topk(self, k, largest=True): """Returns a new tensor with the sorted k largest (or smallest) values""" if self.encrypted: return NotImplemented out_sort = np.sort(self.data) if self.data.ndim > 1: out = np.partition(out_sort, kth=k) out = out[:, -k:] if largest else out[:, :k] else: out = np.partition(out_sort, kth=k) out = out[-k:] if largest else out[:k] return TensorBase(out) def trace(self, axis1=None, axis2=None): """Returns a new tenosr with the sum along diagonals of a 2D tensor. Axis1 and Axis2 are used to extract 2D subarray for sum calculation along diagonals, if tensor has more than two dimensions. """ if self.encrypted: return NotImplemented if axis1 is not None and axis2 is not None and self.data.ndim > 2: out = np.trace(a=self.data, axis1=axis1, axis2=axis2) else: out = np.trace(a=self.data) return TensorBase(out) def view(self, *args): """View the tensor.""" if self.encrypted: return NotImplemented else: dt = np.copy(self.data) return TensorBase(dt.reshape(*args)) def view_as(self, tensor): """ View as another tensor's shape """ if self.encrypted: return NotImplemented else: return self.view(tensor.shape()) def resize_(self, *size): input_size = np.prod(size) extension = input_size - self.data.size flattened = self.data.flatten() if input_size >= 0: if extension > 0: data = np.append(flattened, np.zeros(extension)) self.data = data.reshape(*size) print(self.data) elif extension < 0: size_ = self.data.size + extension self.data = flattened[:size_] self.data = self.data.reshape(*size) print(self.data) else: self.data = self.data.reshape(*size) print(self.data) else: raise ValueError('negative dimension not allowed') def resize_as_(self, tensor): size = tensor.data.shape self.resize_(size) def round(self, decimals=0): """Returns a new tensor with elements rounded off to a nearest decimal place""" if self.encrypted: return NotImplemented out = np.round(self.data, decimals=decimals) return TensorBase(out) def round_(self, decimals=0): """Round the elements of tensor in-place to a nearest decimal place""" if self.encrypted: return NotImplemented self.data = np.round(self.data, decimals=decimals) return self def repeat(self, reps): """Return a new tensor by repeating the values given by reps""" if self.encrypted: return NotImplemented out = np.tile(self.data, reps=reps) return TensorBase(out) def pow(self, exponent): """Return a new tensor by raising elements to the given exponent. If exponent is an array, each element of the tensor is raised positionally to the element of the exponent""" if self.encrypted: return NotImplemented out = np.power(self.data, exponent) return TensorBase(out) def pow_(self, exponent): """Raise elements to the given exponent in-place. If exponent is an array, each element of the tensor is raised positionally to the element of the exponent""" if self.encrypted: return NotImplemented self.data = np.power(self.data, exponent) return self def prod(self, axis=None): """Returns a new tensor with the product of (specified axis) all the elements""" if self.encrypted: return NotImplemented out = np.prod(self.data, axis=axis) return TensorBase(out) def random_(self, low, high=None, size=None): """Fill the tensor in-place with random integers from [low to high)""" if self.encrypted: return NotImplemented self.data = np.random.randint(low=low, high=high, size=size) return self def nonzero(self): """Returns a new tensor with the indices of non-zero elements""" if self.encrypted: return NotImplemented out = np.array(np.nonzero(self.data)) return TensorBase(out) def size(self): """Size of tensor""" if self.encrypted: return NotImplemented else: return self.data.size def cumprod(self, dim=0): """Returns the cumulative product of elements in the dimension dim.""" if self.encrypted: return NotImplemented return syft.math.cumprod(self, dim) def cumprod_(self, dim=0): """calculate in-place the cumulative product of elements in the dimension dim.""" if self.encrypted: return NotImplemented self.data = syft.math.cumprod(self, dim).data return self def split(self, split_size, dim=0): """Returns tuple of tensors of equally sized tensor/chunks (if possible)""" if self.encrypted: return NotImplemented splits = np.array_split(self.data, split_size, axis=0) tensors = list() for s in splits: tensors.append(TensorBase(s)) tensors_tuple = tuple(tensors) return tensors_tuple def squeeze(self, axis=None): """Returns a new tensor with all the single-dimensional entries removed""" if self.encrypted: return NotImplemented out = np.squeeze(self.data, axis=axis) return TensorBase(out) def expand_as(self, tensor): """Returns a new tensor with the expanded size as of the specified (input) tensor""" if self.encrypted: return NotImplemented shape = tensor.data.shape neg_shapes = np.where(shape == -1)[0] if len(neg_shapes) > 1: shape[neg_shapes] = self.data.shape[neg_shapes] out = np.broadcast_to(self.data, shape) return TensorBase(out) def mean(self, dim=None, keepdim=False): """Return the mean of the tensor elements""" if self.encrypted: return NotImplemented out = np.mean(self.data, axis=dim, keepdims=keepdim) return TensorBase(out) def neg(self): """Returns negative of the elements of tensor""" if self.encrypted: return NotImplemented out = -1 * np.array(self.data) return TensorBase(out) def neg_(self): """Returns negative of the elements of tensor inplace""" if self.encrypted: return NotImplemented self.data = -1 * np.array(self.data) return self def normal(self, mu, sigma): """Returns a Tensor of random numbers drawn from separate normal distributions who’s mean and standard deviation are given.""" if self.encrypted: return NotImplemented out = np.random.normal(mu, sigma, self.data.shape) return TensorBase(out) def normal_(self, mu, sigma): """Returns a Tensor of random numbers in-place drawn from separate normal distributions who’s mean and standard deviation are given.""" if self.encrypted: return NotImplemented self.data = np.random.normal(mu, sigma, self.data.shape) return self def ne(self, tensor): """Checks element-wise equality with the given tensor and returns a boolean result with same dimension as the input matrix""" if self.encrypted: return NotImplemented else: if tensor.shape() == self.shape(): tensor2 = np.array([1 if x else 0 for x in np.equal( tensor.data.flatten(), self.data.flatten()).tolist()]) result = tensor2.reshape(self.data.shape) return TensorBase(result) else: raise ValueError('inconsistent dimensions {} and {}'.format( self.shape(), tensor.shape())) def ne_(self, tensor): """ Checks in place element wise equality and updates the data matrix to the equality matrix """ if self.encrypted: return NotImplemented else: value = self.ne(tensor) self.data = value.data def median(self, axis=1, keepdims=False): """Returns median of tensor as per specified axis. By default median is calculated along rows. axis=None can be used get median of whole tensor.""" if self.encrypted: return NotImplemented out = np.median(np.array(self.data), axis=axis, keepdims=keepdims) return TensorBase(out) def mode(self, axis=1): """Returns mode of tensor as per specified axis. By default mode is calculated along rows. To get mode of whole tensor, specify axis=None""" if self.encrypted: return NotImplemented out = scipy.stats.mode(np.array(self.data), axis=axis) return TensorBase(out) def inverse(self): """Returns inverse of a square matrix""" if self.encrypted: return NotImplemented inv = np.linalg.inv(np.matrix(np.array(self.data))) return TensorBase(inv) def min(self, axis=1, keepdims=False): """Returns minimum value in tensor along rows by default but if axis=None it will return minimum value in tensor""" if self.encrypted: return NotImplemented min = np.matrix(np.array(self.data)).min(axis=axis, keepdims=keepdims) return TensorBase(min) def histc(self, bins=10, min=0, max=0): """Computes the histogram of a tensor and Returns it""" if self.encrypted: return NotImplemented hist, edges = np.histogram( np.array(self.data), bins=bins, range=(min, max)) return TensorBase(hist) def scatter_(self, dim, index, src): """ Writes all values from the Tensor ``src`` into ``self`` at the indices specified in the ``index`` Tensor. The indices are specified with respect to the given dimension, ``dim``, in the manner described in gather(). :param dim: The axis along which to index :param index: The indices of elements to scatter :param src: The source element(s) to scatter :return: self """ index = _ensure_tensorbase(index) if self.encrypted or index.encrypted: return NotImplemented if index.data.dtype != np.dtype('int_'): raise TypeError("The values of index must be integers") if self.data.ndim != index.data.ndim: raise ValueError( "Index should have the same number of dimensions as output") if dim >= self.data.ndim or dim < -self.data.ndim: raise IndexError("dim is out of range") if dim < 0: # Not sure why scatter should accept dim < 0, but that is the behavior in PyTorch's scatter dim = self.data.ndim + dim idx_xsection_shape = index.data.shape[:dim] + \ index.data.shape[dim + 1:] self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:] if idx_xsection_shape != self_xsection_shape: raise ValueError("Except for dimension " + str(dim) + ", all dimensions of index and output should be the same size") if (index.data >= self.data.shape[dim]).any() or (index.data < 0).any(): raise IndexError( "The values of index must be between 0 and (self.data.shape[dim] -1)") def make_slice(arr, dim, i): slc = [slice(None)] * arr.ndim slc[dim] = i return slc # We use index and dim parameters to create idx # idx is in a form that can be used as a NumPy advanced index for scattering of src param. in self.data idx = [[*np.indices(idx_xsection_shape).reshape(index.data.ndim - 1, -1), index.data[make_slice(index.data, dim, i)].reshape(1, -1)[0]] for i in range(index.data.shape[dim])] idx = list(np.concatenate(idx, axis=1)) idx.insert(dim, idx.pop()) if not np.isscalar(src): src = _ensure_tensorbase(src) if index.data.shape[dim] > src.data.shape[dim]: raise IndexError("Dimension " + str(dim) + "of index can not be bigger than that of src ") src_shape = src.data.shape[:dim] + src.data.shape[dim + 1:] if idx_xsection_shape != src_shape: raise ValueError("Except for dimension " + str(dim) + ", all dimensions of index and src should be the same size") # src_idx is a NumPy advanced index for indexing of elements in the src src_idx = list(idx) src_idx.pop(dim) src_idx.insert(dim, np.repeat( np.arange(index.data.shape[dim]), np.prod(idx_xsection_shape))) self.data[idx] = src.data[src_idx] else: self.data[idx] = src return self def gather(self, dim, index): """ Gathers values along an axis specified by ``dim``. For a 3-D tensor the output is specified by: out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 :param dim: The axis along which to index :param index: A tensor of indices of elements to gather :return: tensor of gathered values """ index = _ensure_tensorbase(index) if self.encrypted or index.encrypted: return NotImplemented idx_xsection_shape = index.data.shape[:dim] + \ index.data.shape[dim + 1:] self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:] if idx_xsection_shape != self_xsection_shape: raise ValueError("Except for dimension " + str(dim) + ", all dimensions of index and self should be the same size") if index.data.dtype != np.dtype('int_'): raise TypeError("The values of index must be integers") data_swaped = np.swapaxes(self.data, 0, dim) index_swaped = np.swapaxes(index, 0, dim) gathered = np.choose(index_swaped, data_swaped) return TensorBase(np.swapaxes(gathered, 0, dim)) def serialize(self): return pickle.dumps(self) def deserialize(b): return pickle.loads(b) def remainder(self, divisor): """ Computes the element-wise remainder of division. The divisor and dividend may contain both for integer and floating point numbers. The remainder has the same sign as the divisor. When ``divisor`` is a Tensor, the shapes of ``self`` and ``divisor`` must be broadcastable. :param divisor: The divisor. This may be either a number or a tensor. :return: result tensor """ if self.encrypted: return NotImplemented if not np.isscalar(divisor): divisor = _ensure_tensorbase(divisor) return TensorBase(np.remainder(self.data, divisor)) def remainder_(self, divisor): """ Computes the element-wise remainder of division. The divisor and dividend may contain both for integer and floating point numbers. The remainder has the same sign as the divisor. When ``divisor`` is a Tensor, the shapes of ``self`` and ``divisor`` must be broadcastable. :param divisor: The divisor. This may be either a number or a tensor. :return: self """ if self.encrypted: return NotImplemented if not np.isscalar(divisor): divisor = _ensure_tensorbase(divisor) self.data = np.remainder(self.data, divisor) return self def index_select(self, dim, index): """ Returns a new Tensor which indexes the ``input`` Tensor along dimension ``dim`` using the entries in ``index``. :param dim: dimension in which to index :param index: 1D tensor containing the indices to index :return: Tensor of selected indices """ index = _ensure_tensorbase(index) if self.encrypted or index.encrypted: return NotImplemented if index.data.ndim > 1: raise ValueError("Index is supposed to be 1D") return TensorBase(self.data.take(index, axis=dim)) def mv(self, tensorvector): if self.encrypted: raise NotImplemented return mv(self, tensorvector) def masked_scatter_(self, mask, source): """ Copies elements from ``source`` into this tensor at positions where the ``mask`` is true. The shape of ``mask`` must be broadcastable with the shape of the this tensor. The ``source`` should have at least as many elements as the number of ones in ``mask``. :param mask: The binary mask (non-zero is treated as true) :param source: The tensor to copy from :return: """ mask = _ensure_tensorbase(mask) source = _ensure_tensorbase(source) if self.encrypted or mask.encrypted or source.encrypted: return NotImplemented mask_self_iter = np.nditer([mask.data, self.data]) source_iter = np.nditer(source.data) out_flat = [s if m == 0 else source_iter.__next__().item() for m, s in mask_self_iter] self.data = np.reshape(out_flat, self.data.shape) return self def masked_fill_(self, mask, value): """ Fills elements of this ``tensor`` with value where ``mask`` is true. The shape of mask must be broadcastable with the shape of the underlying tensor. :param mask: The binary mask (non-zero is treated as true) :param value: value to fill :return: """ mask = _ensure_tensorbase(mask) if self.encrypted or mask.encrypted: return NotImplemented if not np.isscalar(value): raise ValueError("'value' should be scalar") mask_broadcasted = np.broadcast_to(mask.data, self.data.shape) indices = np.where(mask_broadcasted) self.data[indices] = value return self def masked_select(self, mask): """ See :func:`tensor.masked_select` """ return masked_select(self, mask) def eq(self, t): """Returns a new Tensor having boolean True values where an element of the calling tensor is equal to the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" if self.encrypted: return NotImplemented return TensorBase(np.equal(self.data, _ensure_tensorbase(t).data)) def eq_(self, t): """Writes in-place, boolean True values where an element of the calling tensor is equal to the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" if self.encrypted: return NotImplemented self.data = np.equal(self.data, _ensure_tensorbase(t).data) return self def mm(self, tensor2): """Performs a matrix multiplication of :attr:`tensor1` and :attr:`tensor2`. If :attr:`tensor1` is a `n x m` Tensor, :attr:`tensor2` is a `m x p` Tensor, output will be a `n x p` Tensor. Args: tensor1 (Tensor): First Tensor to be multiplied tensor2 (Tensor): Second Tensor to be multiplied""" return syft.mm(self, tensor2) def mv(tensormat, tensorvector): """ matrix and vector multiplication """ if tensormat.encrypted or tensorvector.encrypted: raise NotImplemented elif not len(tensorvector.data.shape) == 1: raise ValueError('Vector dimensions not correct {}'.format( tensorvector.data.shape)) elif tensorvector.data.shape[0] != tensormat.data.shape[1]: raise ValueError('vector dimensions {} not \ compatible with matrix {} '.format(tensorvector.data.shape, tensormat.data.shape)) else: return TensorBase(np.matmul(tensormat.data, tensorvector.data)) def masked_select(tensor, mask): """ Returns a new 1D Tensor which indexes the ``input`` Tensor according to the binary mask ``mask``. The shapes of the ``mask`` tensor and the ``input`` tensor don’t need to match, but they must be broadcastable. :param tensor: Input tensor :param mask: The binary mask (non-zero is treated as true) :return: 1D output tensor """ mask = _ensure_tensorbase(mask) tensor = _ensure_tensorbase(tensor) if tensor.encrypted or mask.encrypted: raise NotImplemented mask_broadcasted, data_broadcasted = np.broadcast_arrays( mask.data, tensor.data) indices = np.where(mask_broadcasted) return TensorBase(data_broadcasted[indices])
apache-2.0
-7,033,735,739,887,827,000
37.06155
158
0.608203
false
cloudera/hue
desktop/core/ext-py/tabulate-0.8.9/test/test_input.py
2
15575
# -*- coding: utf-8 -*- """Test support of the various forms of tabular data.""" from __future__ import print_function from __future__ import unicode_literals from tabulate import tabulate from common import assert_equal, assert_in, raises, skip try: from collections import UserDict except ImportError: # Python2 from UserDict import UserDict def test_iterable_of_iterables(): "Input: an interable of iterables." ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)])) expected = "\n".join( ["- - - - -", "0 1 2 3 4", "5 4 3 2 1", "- - - - -"] ) result = tabulate(ii) assert_equal(expected, result) def test_iterable_of_iterables_headers(): "Input: an interable of iterables with headers." ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)])) expected = "\n".join( [ " a b c d e", "--- --- --- --- ---", " 0 1 2 3 4", " 5 4 3 2 1", ] ) result = tabulate(ii, "abcde") assert_equal(expected, result) def test_iterable_of_iterables_firstrow(): "Input: an interable of iterables with the first row as headers" ii = iter(map(lambda x: iter(x), ["abcde", range(5), range(5, 0, -1)])) expected = "\n".join( [ " a b c d e", "--- --- --- --- ---", " 0 1 2 3 4", " 5 4 3 2 1", ] ) result = tabulate(ii, "firstrow") assert_equal(expected, result) def test_list_of_lists(): "Input: a list of lists with headers." ll = [["a", "one", 1], ["b", "two", None]] expected = "\n".join( [ " string number", "-- -------- --------", "a one 1", "b two", ] ) result = tabulate(ll, headers=["string", "number"]) assert_equal(expected, result) def test_list_of_lists_firstrow(): "Input: a list of lists with the first row as headers." ll = [["string", "number"], ["a", "one", 1], ["b", "two", None]] expected = "\n".join( [ " string number", "-- -------- --------", "a one 1", "b two", ] ) result = tabulate(ll, headers="firstrow") assert_equal(expected, result) def test_list_of_lists_keys(): "Input: a list of lists with column indices as headers." ll = [["a", "one", 1], ["b", "two", None]] expected = "\n".join( ["0 1 2", "--- --- ---", "a one 1", "b two"] ) result = tabulate(ll, headers="keys") assert_equal(expected, result) def test_dict_like(): "Input: a dict of iterables with keys as headers." # columns should be padded with None, keys should be used as headers dd = {"a": range(3), "b": range(101, 105)} # keys' order (hence columns' order) is not deterministic in Python 3 # => we have to consider both possible results as valid expected1 = "\n".join( [" a b", "--- ---", " 0 101", " 1 102", " 2 103", " 104"] ) expected2 = "\n".join( [" b a", "--- ---", "101 0", "102 1", "103 2", "104"] ) result = tabulate(dd, "keys") print("Keys' order: %s" % dd.keys()) assert_in(result, [expected1, expected2]) def test_numpy_2d(): "Input: a 2D NumPy array with headers." try: import numpy na = (numpy.arange(1, 10, dtype=numpy.float32).reshape((3, 3)) ** 3) * 0.5 expected = "\n".join( [ " a b c", "----- ----- -----", " 0.5 4 13.5", " 32 62.5 108", "171.5 256 364.5", ] ) result = tabulate(na, ["a", "b", "c"]) assert_equal(expected, result) except ImportError: skip("test_numpy_2d is skipped") def test_numpy_2d_firstrow(): "Input: a 2D NumPy array with the first row as headers." try: import numpy na = numpy.arange(1, 10, dtype=numpy.int32).reshape((3, 3)) ** 3 expected = "\n".join( [" 1 8 27", "--- --- ----", " 64 125 216", "343 512 729"] ) result = tabulate(na, headers="firstrow") assert_equal(expected, result) except ImportError: skip("test_numpy_2d_firstrow is skipped") def test_numpy_2d_keys(): "Input: a 2D NumPy array with column indices as headers." try: import numpy na = (numpy.arange(1, 10, dtype=numpy.float32).reshape((3, 3)) ** 3) * 0.5 expected = "\n".join( [ " 0 1 2", "----- ----- -----", " 0.5 4 13.5", " 32 62.5 108", "171.5 256 364.5", ] ) result = tabulate(na, headers="keys") assert_equal(expected, result) except ImportError: skip("test_numpy_2d_keys is skipped") def test_numpy_record_array(): "Input: a 2D NumPy record array without header." try: import numpy na = numpy.asarray( [("Alice", 23, 169.5), ("Bob", 27, 175.0)], dtype={ "names": ["name", "age", "height"], "formats": ["a32", "uint8", "float32"], }, ) expected = "\n".join( [ "----- -- -----", "Alice 23 169.5", "Bob 27 175", "----- -- -----", ] ) result = tabulate(na) assert_equal(expected, result) except ImportError: skip("test_numpy_2d_keys is skipped") def test_numpy_record_array_keys(): "Input: a 2D NumPy record array with column names as headers." try: import numpy na = numpy.asarray( [("Alice", 23, 169.5), ("Bob", 27, 175.0)], dtype={ "names": ["name", "age", "height"], "formats": ["a32", "uint8", "float32"], }, ) expected = "\n".join( [ "name age height", "------ ----- --------", "Alice 23 169.5", "Bob 27 175", ] ) result = tabulate(na, headers="keys") assert_equal(expected, result) except ImportError: skip("test_numpy_2d_keys is skipped") def test_numpy_record_array_headers(): "Input: a 2D NumPy record array with user-supplied headers." try: import numpy na = numpy.asarray( [("Alice", 23, 169.5), ("Bob", 27, 175.0)], dtype={ "names": ["name", "age", "height"], "formats": ["a32", "uint8", "float32"], }, ) expected = "\n".join( [ "person years cm", "-------- ------- -----", "Alice 23 169.5", "Bob 27 175", ] ) result = tabulate(na, headers=["person", "years", "cm"]) assert_equal(expected, result) except ImportError: skip("test_numpy_2d_keys is skipped") def test_pandas(): "Input: a Pandas DataFrame." try: import pandas df = pandas.DataFrame([["one", 1], ["two", None]], index=["a", "b"]) expected = "\n".join( [ " string number", "-- -------- --------", "a one 1", "b two nan", ] ) result = tabulate(df, headers=["string", "number"]) assert_equal(expected, result) except ImportError: skip("test_pandas is skipped") def test_pandas_firstrow(): "Input: a Pandas DataFrame with the first row as headers." try: import pandas df = pandas.DataFrame( [["one", 1], ["two", None]], columns=["string", "number"], index=["a", "b"] ) expected = "\n".join( ["a one 1.0", "--- ----- -----", "b two nan"] ) result = tabulate(df, headers="firstrow") assert_equal(expected, result) except ImportError: skip("test_pandas_firstrow is skipped") def test_pandas_keys(): "Input: a Pandas DataFrame with keys as headers." try: import pandas df = pandas.DataFrame( [["one", 1], ["two", None]], columns=["string", "number"], index=["a", "b"] ) expected = "\n".join( [ " string number", "-- -------- --------", "a one 1", "b two nan", ] ) result = tabulate(df, headers="keys") assert_equal(expected, result) except ImportError: skip("test_pandas_keys is skipped") def test_sqlite3(): "Input: an sqlite3 cursor" try: import sqlite3 conn = sqlite3.connect(":memory:") cursor = conn.cursor() cursor.execute("CREATE TABLE people (name, age, height)") for values in [("Alice", 23, 169.5), ("Bob", 27, 175.0)]: cursor.execute("INSERT INTO people VALUES (?, ?, ?)", values) cursor.execute("SELECT name, age, height FROM people ORDER BY name") result = tabulate(cursor, headers=["whom", "how old", "how tall"]) expected = """\ whom how old how tall ------ --------- ---------- Alice 23 169.5 Bob 27 175""" assert_equal(expected, result) except ImportError: skip("test_sqlite3 is skipped") def test_sqlite3_keys(): "Input: an sqlite3 cursor with keys as headers" try: import sqlite3 conn = sqlite3.connect(":memory:") cursor = conn.cursor() cursor.execute("CREATE TABLE people (name, age, height)") for values in [("Alice", 23, 169.5), ("Bob", 27, 175.0)]: cursor.execute("INSERT INTO people VALUES (?, ?, ?)", values) cursor.execute( 'SELECT name "whom", age "how old", height "how tall" FROM people ORDER BY name' ) result = tabulate(cursor, headers="keys") expected = """\ whom how old how tall ------ --------- ---------- Alice 23 169.5 Bob 27 175""" assert_equal(expected, result) except ImportError: skip("test_sqlite3_keys is skipped") def test_list_of_namedtuples(): "Input: a list of named tuples with field names as headers." from collections import namedtuple NT = namedtuple("NT", ["foo", "bar"]) lt = [NT(1, 2), NT(3, 4)] expected = "\n".join(["- -", "1 2", "3 4", "- -"]) result = tabulate(lt) assert_equal(expected, result) def test_list_of_namedtuples_keys(): "Input: a list of named tuples with field names as headers." from collections import namedtuple NT = namedtuple("NT", ["foo", "bar"]) lt = [NT(1, 2), NT(3, 4)] expected = "\n".join( [" foo bar", "----- -----", " 1 2", " 3 4"] ) result = tabulate(lt, headers="keys") assert_equal(expected, result) def test_list_of_dicts(): "Input: a list of dictionaries." lod = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}] expected1 = "\n".join(["- -", "1 2", "3 4", "- -"]) expected2 = "\n".join(["- -", "2 1", "4 3", "- -"]) result = tabulate(lod) assert_in(result, [expected1, expected2]) def test_list_of_userdicts(): "Input: a list of UserDicts." lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)] expected1 = "\n".join(["- -", "1 2", "3 4", "- -"]) expected2 = "\n".join(["- -", "2 1", "4 3", "- -"]) result = tabulate(lod) assert_in(result, [expected1, expected2]) def test_list_of_dicts_keys(): "Input: a list of dictionaries, with keys as headers." lod = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}] expected1 = "\n".join( [" foo bar", "----- -----", " 1 2", " 3 4"] ) expected2 = "\n".join( [" bar foo", "----- -----", " 2 1", " 4 3"] ) result = tabulate(lod, headers="keys") assert_in(result, [expected1, expected2]) def test_list_of_userdicts_keys(): "Input: a list of UserDicts." lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)] expected1 = "\n".join( [" foo bar", "----- -----", " 1 2", " 3 4"] ) expected2 = "\n".join( [" bar foo", "----- -----", " 2 1", " 4 3"] ) result = tabulate(lod, headers="keys") assert_in(result, [expected1, expected2]) def test_list_of_dicts_with_missing_keys(): "Input: a list of dictionaries, with missing keys." lod = [{"foo": 1}, {"bar": 2}, {"foo": 4, "baz": 3}] expected = "\n".join( [ " foo bar baz", "----- ----- -----", " 1", " 2", " 4 3", ] ) result = tabulate(lod, headers="keys") assert_equal(expected, result) def test_list_of_dicts_firstrow(): "Input: a list of dictionaries, with the first dict as headers." lod = [{"foo": "FOO", "bar": "BAR"}, {"foo": 3, "bar": 4, "baz": 5}] # if some key is missing in the first dict, use the key name instead expected1 = "\n".join( [" FOO BAR baz", "----- ----- -----", " 3 4 5"] ) expected2 = "\n".join( [" BAR FOO baz", "----- ----- -----", " 4 3 5"] ) result = tabulate(lod, headers="firstrow") assert_in(result, [expected1, expected2]) def test_list_of_dicts_with_dict_of_headers(): "Input: a dict of user headers for a list of dicts (issue #23)" table = [{"letters": "ABCDE", "digits": 12345}] headers = {"digits": "DIGITS", "letters": "LETTERS"} expected1 = "\n".join( [" DIGITS LETTERS", "-------- ---------", " 12345 ABCDE"] ) expected2 = "\n".join( ["LETTERS DIGITS", "--------- --------", "ABCDE 12345"] ) result = tabulate(table, headers=headers) assert_in(result, [expected1, expected2]) def test_list_of_dicts_with_list_of_headers(): "Input: ValueError on a list of headers with a list of dicts (issue #23)" table = [{"letters": "ABCDE", "digits": 12345}] headers = ["DIGITS", "LETTERS"] with raises(ValueError): tabulate(table, headers=headers) def test_py27orlater_list_of_ordereddicts(): "Input: a list of OrderedDicts." from collections import OrderedDict od = OrderedDict([("b", 1), ("a", 2)]) lod = [od, od] expected = "\n".join([" b a", "--- ---", " 1 2", " 1 2"]) result = tabulate(lod, headers="keys") assert_equal(expected, result)
apache-2.0
-3,512,393,847,724,832,300
30.313278
92
0.454254
false
ambhas/gw_stream
src/main_model.py
1
10111
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Wed Dec 17 14:25:08 2014 @author: Sat Kumar Tomer @email: satkumartomer@gmail.com @website: www.ambhas.com """ from __future__ import division import numpy as np from ambhas.gis import read_ascii_grid import matplotlib.pyplot as plt from scipy.sparse.linalg import spsolve from scipy.sparse import lil_matrix from matplotlib.mlab import find import datetime from ambhas.time import doy2md, ymd2doy from osgeo.gdalconst import * import gdal import os import csv def ambhas_gw_2d_xy(watershed, hini, T, Sy, dx, dt, hmin, par_discharge, net_recharge): """ this class performs the 2 dimensinoal groundwater modelling in horizonal plane using the 2 dimensional groundwater flow equation The 2D groundwater euqation is solved using generic implicit-explicit method Input: watershed: map of watershed; 1 means inside and 0 means outside watershed hini: initial groundwater level T: transmissivity Sy: specific yield dx: spatial resolution dt: time step hmin: groundwater level corresponding to zero discharge par_discharge: parameter controlling the discharge (range is 0 to 1) net_recharge: net recharge to the groundwater (recharge-groundwater pumping) Output: hnew: groundwater level at next time step discharge: baseflow (volume/time) """ d = dx*1.0 # spatial resolution of the model D = T/Sy #neumann_criterion = 0.5*d**2/D neumann_criterion = 2*D*dt/d**2 #max_neumann_criterion = neumann_criterion.max() # update the model for recharge hini = hini + net_recharge/Sy # take the discharge out from each cell discharge = (1-par_discharge)*(hini-hmin)*Sy discharge[hini<hmin] = 0 hini = hini - discharge/Sy # spatial computing n = int(np.sum(watershed)) foo = np.cumsum(watershed.flatten()) foo.shape = watershed.shape foo[watershed==0] = np.nan foo = foo-1 # indices start from 0 ih, jh = np.unravel_index(find(~np.isnan(foo)), watershed.shape) # indices of non nan alpha = 1.0 # implicit method # setup A and b matrix A = lil_matrix((n, n)) b = np.zeros(n,) for i in xrange(n): a1 = alpha/d**2 a2 = -(4*alpha/d**2 + Sy/T[ih[i],jh[i]]/dt) # i,j A[i,i] = a2 b[i] = (-4*(1-alpha)/d**2 - Sy/T[ih[i],jh[i]]/dt)*hini[ih[i],jh[i]] # i-1,j ind_h = foo[ih[i]-1,jh[i]] if np.isnan(ind_h): A[i,i] = A[i,i] + a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i],jh[i]] else: A[i,int(ind_h)] = a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i]-1,jh[i]] # i+1, j ind_h = foo[ih[i]+1,jh[i]] if np.isnan(ind_h): A[i,i] = A[i,i] + a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i],jh[i]] else: A[i,int(ind_h)] = a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i]+1,jh[i]] # i, j-1 ind_h = foo[ih[i],jh[i]-1] if np.isnan(ind_h): A[i,i] = A[i,i] + a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i],jh[i]] else: A[i,int(ind_h)] = a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i],jh[i]-1] # i, j+1 ind_h = foo[ih[i],jh[i]+1] if np.isnan(ind_h): A[i,i] = A[i,i] + a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i],jh[i]] else: A[i,int(ind_h)] = a1 b[i] = b[i] + (1-alpha)/d**2*hini[ih[i],jh[i]+1] # solve tmp = spsolve(A.tocsr(),b) hnew = np.zeros(watershed.shape) hnew[ih,jh] = tmp hnew[watershed==0] = np.nan return hnew, np.nansum(discharge)*d**2 def ambhas_stream_2d_xy(dem, stream_depth, stream_k, stream_area, stream_m, h_gw): h_stream_bed = dem - stream_depth Ql = (stream_k*stream_area/stream_m)*(h_gw-h_stream_bed) return Ql def run_model(input_file_name): # read input file print('Started reading input file %s'%input_file_name) par = {} with open(input_file_name) as f: for line in f: line = line.split('#', 1)[0] line = line.rstrip() if len(line)>1: key, value = line.split() par[key.strip()] = value.strip() start_year = int(par['start_year']) end_year = int(par['end_year']) start_month, start_day = doy2md(int(par['start_doy']), start_year) end_month, end_day = doy2md(int(par['end_doy']), end_year) start_dt = datetime.date(start_year, start_month, start_day) end_dt = datetime.date(end_year, end_month, end_day) n_time = end_dt.toordinal()-start_dt.toordinal()+1 print('Read temporal parameters') hini_file = par['hini_file'] dataset = gdal.Open(hini_file, GA_ReadOnly) hini = dataset.GetRasterBand(1).ReadAsArray() geotransform = dataset.GetGeoTransform() RasterXSize = dataset.RasterXSize RasterYSize = dataset.RasterYSize dataset = None print('Read hini') dem_file = par['dem_file'] dataset = gdal.Open(dem_file, GA_ReadOnly) dem = dataset.GetRasterBand(1).ReadAsArray() dataset = None print('Read DEM') stream_file = par['stream_file'] dataset = gdal.Open(stream_file, GA_ReadOnly) stream = dataset.GetRasterBand(1).ReadAsArray() dataset = None print('Read stream') watershed_file = par['watershed_file'] dataset = gdal.Open(watershed_file, GA_ReadOnly) watershed = dataset.GetRasterBand(1).ReadAsArray() dataset = None watershed[watershed>0] = 1 watershed[np.isnan(watershed)] = 0 watershed[0,:] = 0 watershed[-1,:] = 0 watershed[:,0] = 0 watershed[:,-1] = 0 print('Read watershed') T = np.empty(hini.shape) T[:] = float(par['T']) Sy = float(par['Sy']) hmin = float(par['hmin']) par_discharge = float(par['par_discharge']) recharge_factor = float(par['recharge_factor']) rain_multiplier = float(par['rain_multiplier']) stream_depth = float(par['stream_depth']) stream_k = float(par['stream_k']) stream_m = float(par['stream_m']) print('Read groundwater parameters') print('Finished reading input file %s'%input_file_name) if not os.path.exists(par['h_dir_tif']): os.makedirs(par['h_dir_tif']) if not os.path.exists(par['h_dir_png']): os.makedirs(par['h_dir_png']) if not os.path.exists(par['ql_dir_tif']): os.makedirs(par['ql_dir_tif']) if not os.path.exists(par['ql_dir_png']): os.makedirs(par['ql_dir_png']) dx = geotransform[1] dt = 1.0 stream_area = stream*dx**2 plt.ioff() out_file = par['discharge_file'] with open(out_file, 'w') as the_file: header = ['year', 'month', 'day', 'doy', 'mean_h_gw', 'discharge'] writer = csv.writer(the_file, quotechar='"') writer.writerow(header) for t in range(n_time): t_ordinal = datetime.date.fromordinal(start_dt.toordinal()+t) t_day = t_ordinal.day t_month = t_ordinal.month t_year = t_ordinal.year t_doy = ymd2doy([t_year], [t_month], [t_day])[0] # read rainfall file rain_dir = par['rain_dir'] rain_file = os.path.join(rain_dir, '%i%03d.tif'%(t_year, t_doy)) dataset = gdal.Open(rain_file, GA_ReadOnly) rain = dataset.GetRasterBand(1).ReadAsArray() dataset = None # compute recharge net_recharge = recharge_factor*rain*rain_multiplier # groundwater model h_gw, discharge = ambhas_gw_2d_xy(watershed, hini, T, Sy, dx, dt, hmin, par_discharge, net_recharge) # stream model Ql = ambhas_stream_2d_xy(dem, stream_depth, stream_k, stream_area, stream_m, h_gw) hini = h_gw - Ql/(dx**2) #discharge_stream = np.nansum(Ql) mean_h_gw = np.nanmean(hini[watershed>0]) foo = ['%d'%t_year, '%d'%t_month, '%d'%t_day, '%d'%t_doy, '%.2f'%mean_h_gw, '%.2f'%discharge] writer.writerow(foo) # save the gw level as Gtiff out_file = os.path.join(par['h_dir_tif'], '%i%03d.tif'%(t_year, t_doy)) driver = gdal.GetDriverByName('GTiff') output_dataset = driver.Create(out_file, RasterXSize, RasterYSize, 1, gdal.GDT_Float32) output_dataset.SetGeoTransform(geotransform) output_dataset.GetRasterBand(1).WriteArray(hini, 0, 0) output_dataset = None # save the gw level as png plt.matshow(hini) plt.colorbar(shrink=0.7) fig_png = os.path.join(par['h_dir_png'], '%i%03d.png'%(t_year, t_doy)) plt.savefig(fig_png) plt.close() # save the Ql as Gtiff out_file = os.path.join(par['ql_dir_tif'], '%i%03d.tif'%(t_year, t_doy)) driver = gdal.GetDriverByName('GTiff') output_dataset = driver.Create(out_file, RasterXSize, RasterYSize, 1, gdal.GDT_Float32) output_dataset.SetGeoTransform(geotransform) output_dataset.GetRasterBand(1).WriteArray(Ql, 0, 0) output_dataset = None # save the gw level as png plt.matshow(Ql) plt.colorbar(shrink=0.7) fig_png = os.path.join(par['ql_dir_png'], '%i%03d.png'%(t_year, t_doy)) plt.savefig(fig_png) plt.close() print('year=%i, doy=%03d'%(t_year, t_doy)) if __name__ == "__main__": run_model('../input/input.txt')
gpl-2.0
6,412,644,428,528,142,000
32.483444
125
0.544654
false
crcox/NEXT
next/apps/PoolBasedTripletMDS/algs/UncertaintySampling/UncertaintySampling.py
1
5040
""" UncertaintySampling app of the Online Learning Library for Next.Discovery author: Kevin Jamieson, kevin.g.jamieson@gmail.com last updated: 1/17/2015 """ import numpy import numpy.random from next.apps.PoolBasedTripletMDS.algs.UncertaintySampling import utilsMDS from next.apps.PoolBasedTripletMDS.Prototype import PoolBasedTripletMDSPrototype import time class UncertaintySampling(PoolBasedTripletMDSPrototype): def daemonProcess(self,resource,daemon_args_dict): if 'task' in daemon_args_dict and 'args' in daemon_args_dict: task = daemon_args_dict['task'] args = daemon_args_dict['args'] if task == '__full_embedding_update': self.__full_embedding_update(resource,args) elif task == '__incremental_embedding_update': self.__incremental_embedding_update(resource,args) else: return False return True def initExp(self,resource,n,d,failure_probability,params): X = numpy.random.randn(n,d) resource.set('n',n) resource.set('d',d) resource.set('delta',failure_probability) resource.set('X',X.tolist()) return True def getQuery(self,resource): n = resource.get('n') d = resource.get('d') # If number of reported answers is small, generate random to avoid overfitting num_reported_answers = resource.get('num_reported_answers') if num_reported_answers == None: num_reported_answers = 0 R = int(1+d*numpy.log(n)) if num_reported_answers < R*n: a = num_reported_answers/R b = numpy.random.randint(n) while b==a: b = numpy.random.randint(n) c = numpy.random.randint(n) while c==a or c==b: c = numpy.random.randint(n) return a, b, c # generate an active query X = numpy.array(resource.get('X')) # set maximum time allowed to search for a query t_max = 0.05 q,signed_score = utilsMDS.getRandomQuery(X) best_q = q best_score = abs(signed_score) t_start = time.time() while time.time()-t_start<t_max: q,signed_score = utilsMDS.getRandomQuery(X) if abs(signed_score) < best_score: best_q = q best_score = abs(signed_score) index_center = best_q[2] index_left = best_q[0] index_right = best_q[1] return index_center,index_left,index_right def processAnswer(self,resource,index_center,index_left,index_right,index_winner): if index_left==index_winner: q = [index_left,index_right,index_center] else: q = [index_right,index_left,index_center] resource.append_list('S',q) n = resource.get('n') d = resource.get('d') num_reported_answers = resource.increment('num_reported_answers') if num_reported_answers % int(n) == 0: daemon_args_dict = {'task':'__full_embedding_update','args':{}} resource.daemonProcess(daemon_args_dict,time_limit=30) else: daemon_args_dict = {'task':'__incremental_embedding_update','args':{}} resource.daemonProcess(daemon_args_dict,time_limit=5) return True def predict(self,resource): key_value_dict = resource.get_many(['X','num_reported_answers']) X = key_value_dict.get('X',[]) num_reported_answers = key_value_dict.get('num_reported_answers',[]) return X,num_reported_answers def __incremental_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X = numpy.array(resource.get('X')) # set maximum time allowed to update embedding t_max = 1.0 epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation # take a single gradient step t_start = time.time() X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1,verbose=verbose) k = 1 while (time.time()-t_start<0.5*t_max) and (acc > epsilon): # take a single gradient step X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k,verbose=verbose) k += 1 resource.set('X',X.tolist()) def __full_embedding_update(self,resource,args): verbose = False n = resource.get('n') d = resource.get('d') S = resource.get_list('S') X_old = numpy.array(resource.get('X')) t_max = 5.0 epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation emp_loss_old,hinge_loss_old = utilsMDS.getLoss(X_old,S) X,tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=16,epsilon=0,verbose=verbose) t_start = time.time() X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1,verbose=verbose) k = 1 while (time.time()-t_start<0.5*t_max) and (acc > epsilon): X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k,verbose=verbose) k += 1 emp_loss_new,hinge_loss_new = utilsMDS.getLoss(X,S) if emp_loss_old < emp_loss_new: X = X_old resource.set('X',X.tolist())
apache-2.0
-2,834,126,474,976,719,400
28.647059
109
0.659524
false
nekia/incubator-superset-dev
superset/utils.py
1
19675
"""Utility functions used across Superset""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import decimal import functools import json import logging import numpy import os import parsedatetime import pytz import smtplib import sqlalchemy as sa import signal import uuid import sys import zlib from builtins import object from datetime import date, datetime, time import celery from dateutil.parser import parse from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.application import MIMEApplication from email.utils import formatdate from flask import flash, Markup, render_template, url_for, redirect, request from flask_appbuilder.const import ( LOGMSG_ERR_SEC_ACCESS_DENIED, FLAMSG_ERR_SEC_ACCESS_DENIED, PERMISSION_PREFIX ) from flask_cache import Cache from flask_appbuilder._compat import as_unicode from flask_babel import gettext as __ import markdown as md from past.builtins import basestring from pydruid.utils.having import Having from sqlalchemy import event, exc, select from sqlalchemy.types import TypeDecorator, TEXT logging.getLogger('MARKDOWN').setLevel(logging.INFO) PY3K = sys.version_info >= (3, 0) EPOCH = datetime(1970, 1, 1) DTTM_ALIAS = '__timestamp' class SupersetException(Exception): pass class SupersetTimeoutException(SupersetException): pass class SupersetSecurityException(SupersetException): pass class MetricPermException(SupersetException): pass class NoDataException(SupersetException): pass class SupersetTemplateException(SupersetException): pass def can_access(sm, permission_name, view_name, user): """Protecting from has_access failing from missing perms/view""" if user.is_anonymous(): return sm.is_item_public(permission_name, view_name) else: return sm._has_view_access(user, permission_name, view_name) def flasher(msg, severity=None): """Flask's flash if available, logging call if not""" try: flash(msg, severity) except RuntimeError: if severity == 'danger': logging.error(msg) else: logging.info(msg) class memoized(object): # noqa """Decorator that caches a function's return value each time it is called If called later with the same arguments, the cached value is returned, and not re-evaluated. """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: value = self.func(*args) self.cache[args] = value return value except TypeError: # uncachable -- for instance, passing a list as an argument. # Better to not cache than to blow up entirely. return self.func(*args) def __repr__(self): """Return the function's docstring.""" return self.func.__doc__ def __get__(self, obj, objtype): """Support instance methods.""" return functools.partial(self.__call__, obj) def js_string_to_python(item): return None if item in ('null', 'undefined') else item def string_to_num(s): """Converts a string to an int/float Returns ``None`` if it can't be converted >>> string_to_num('5') 5 >>> string_to_num('5.2') 5.2 >>> string_to_num(10) 10 >>> string_to_num(10.1) 10.1 >>> string_to_num('this is not a string') is None True """ if isinstance(s, (int, float)): return s if s.isdigit(): return int(s) try: return float(s) except ValueError: return None class DimSelector(Having): def __init__(self, **args): # Just a hack to prevent any exceptions Having.__init__(self, type='equalTo', aggregation=None, value=None) self.having = {'having': { 'type': 'dimSelector', 'dimension': args['dimension'], 'value': args['value'], }} def list_minus(l, minus): """Returns l without what is in minus >>> list_minus([1, 2, 3], [2]) [1, 3] """ return [o for o in l if o not in minus] def parse_human_datetime(s): """ Returns ``datetime.datetime`` from human readable strings >>> from datetime import date, timedelta >>> from dateutil.relativedelta import relativedelta >>> parse_human_datetime('2015-04-03') datetime.datetime(2015, 4, 3, 0, 0) >>> parse_human_datetime('2/3/1969') datetime.datetime(1969, 2, 3, 0, 0) >>> parse_human_datetime("now") <= datetime.now() True >>> parse_human_datetime("yesterday") <= datetime.now() True >>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date() True >>> year_ago_1 = parse_human_datetime('one year ago').date() >>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date() >>> year_ago_1 == year_ago_2 True """ try: dttm = parse(s) except Exception: try: cal = parsedatetime.Calendar() parsed_dttm, parsed_flags = cal.parseDT(s) # when time is not extracted, we "reset to midnight" if parsed_flags & 2 == 0: parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0) dttm = dttm_from_timtuple(parsed_dttm.utctimetuple()) except Exception as e: logging.exception(e) raise ValueError("Couldn't parse date string [{}]".format(s)) return dttm def dttm_from_timtuple(d): return datetime( d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec) def parse_human_timedelta(s): """ Returns ``datetime.datetime`` from natural language time deltas >>> parse_human_datetime("now") <= datetime.now() True """ cal = parsedatetime.Calendar() dttm = dttm_from_timtuple(datetime.now().timetuple()) d = cal.parse(s, dttm)[0] d = datetime( d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec) return d - dttm class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = TEXT def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value def datetime_f(dttm): """Formats datetime to take less room when it is recent""" if dttm: dttm = dttm.isoformat() now_iso = datetime.now().isoformat() if now_iso[:10] == dttm[:10]: dttm = dttm[11:] elif now_iso[:4] == dttm[:4]: dttm = dttm[5:] return "<nobr>{}</nobr>".format(dttm) def base_json_conv(obj): if isinstance(obj, numpy.int64): return int(obj) elif isinstance(obj, numpy.bool_): return bool(obj) elif isinstance(obj, set): return list(obj) elif isinstance(obj, decimal.Decimal): return float(obj) elif isinstance(obj, uuid.UUID): return str(obj) def json_iso_dttm_ser(obj): """ json serializer that deals with dates >>> dttm = datetime(1970, 1, 1) >>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser) '{"dttm": "1970-01-01T00:00:00"}' """ val = base_json_conv(obj) if val is not None: return val if isinstance(obj, datetime): obj = obj.isoformat() elif isinstance(obj, date): obj = obj.isoformat() elif isinstance(obj, time): obj = obj.isoformat() else: raise TypeError( "Unserializable object {} of type {}".format(obj, type(obj)) ) return obj def datetime_to_epoch(dttm): if dttm.tzinfo: epoch_with_tz = pytz.utc.localize(EPOCH) return (dttm - epoch_with_tz).total_seconds() * 1000 return (dttm - EPOCH).total_seconds() * 1000 def now_as_float(): return datetime_to_epoch(datetime.utcnow()) def json_int_dttm_ser(obj): """json serializer that deals with dates""" val = base_json_conv(obj) if val is not None: return val if isinstance(obj, datetime): obj = datetime_to_epoch(obj) elif isinstance(obj, date): obj = (obj - EPOCH.date()).total_seconds() * 1000 else: raise TypeError( "Unserializable object {} of type {}".format(obj, type(obj)) ) return obj def json_dumps_w_dates(payload): return json.dumps(payload, default=json_int_dttm_ser) def error_msg_from_exception(e): """Translate exception into error message Database have different ways to handle exception. This function attempts to make sense of the exception object and construct a human readable sentence. TODO(bkyryliuk): parse the Presto error message from the connection created via create_engine. engine = create_engine('presto://localhost:3506/silver') - gives an e.message as the str(dict) presto.connect("localhost", port=3506, catalog='silver') - as a dict. The latter version is parsed correctly by this function. """ msg = '' if hasattr(e, 'message'): if type(e.message) is dict: msg = e.message.get('message') elif e.message: msg = "{}".format(e.message) return msg or '{}'.format(e) def markdown(s, markup_wrap=False): s = md.markdown(s or '', [ 'markdown.extensions.tables', 'markdown.extensions.fenced_code', 'markdown.extensions.codehilite', ]) if markup_wrap: s = Markup(s) return s def readfile(file_path): with open(file_path) as f: content = f.read() return content def generic_find_constraint_name(table, columns, referenced, db): """Utility to find a constraint name in alembic migrations""" t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine) for fk in t.foreign_key_constraints: if ( fk.referred_table.name == referenced and set(fk.column_keys) == columns): return fk.name def get_datasource_full_name(database_name, datasource_name, schema=None): if not schema: return "[{}].[{}]".format(database_name, datasource_name) return "[{}].[{}].[{}]".format(database_name, schema, datasource_name) def get_schema_perm(database, schema): if schema: return "[{}].[{}]".format(database, schema) def validate_json(obj): if obj: try: json.loads(obj) except Exception: raise SupersetException("JSON is not valid") def table_has_constraint(table, name, db): """Utility to find a constraint name in alembic migrations""" t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine) for c in t.constraints: if c.name == name: return True return False class timeout(object): """ To be used in a ``with`` block and timeout its content. """ def __init__(self, seconds=1, error_message='Timeout'): self.seconds = seconds self.error_message = error_message def handle_timeout(self, signum, frame): logging.error("Process timed out") raise SupersetTimeoutException(self.error_message) def __enter__(self): try: signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) except ValueError as e: logging.warning("timeout can't be used in the current context") logging.exception(e) def __exit__(self, type, value, traceback): try: signal.alarm(0) except ValueError as e: logging.warning("timeout can't be used in the current context") logging.exception(e) def pessimistic_connection_handling(some_engine): @event.listens_for(some_engine, "engine_connect") def ping_connection(connection, branch): if branch: # "branch" refers to a sub-connection of a connection, # we don't want to bother pinging on these. return # turn off "close with result". This flag is only used with # "connectionless" execution, otherwise will be False in any case save_should_close_with_result = connection.should_close_with_result connection.should_close_with_result = False try: # run a SELECT 1. use a core select() so that # the SELECT of a scalar value without a table is # appropriately formatted for the backend connection.scalar(select([1])) except exc.DBAPIError as err: # catch SQLAlchemy's DBAPIError, which is a wrapper # for the DBAPI's exception. It includes a .connection_invalidated # attribute which specifies if this connection is a "disconnect" # condition, which is based on inspection of the original exception # by the dialect in use. if err.connection_invalidated: # run the same SELECT again - the connection will re-validate # itself and establish a new connection. The disconnect detection # here also causes the whole connection pool to be invalidated # so that all stale connections are discarded. connection.scalar(select([1])) else: raise finally: # restore "close with result" connection.should_close_with_result = save_should_close_with_result class QueryStatus(object): """Enum-type class for query statuses""" STOPPED = 'stopped' FAILED = 'failed' PENDING = 'pending' RUNNING = 'running' SCHEDULED = 'scheduled' SUCCESS = 'success' TIMED_OUT = 'timed_out' def notify_user_about_perm_udate( granter, user, role, datasource, tpl_name, config): msg = render_template(tpl_name, granter=granter, user=user, role=role, datasource=datasource) logging.info(msg) subject = __('[Superset] Access to the datasource %(name)s was granted', name=datasource.full_name) send_email_smtp(user.email, subject, msg, config, bcc=granter.email, dryrun=not config.get('EMAIL_NOTIFICATIONS')) def send_email_smtp(to, subject, html_content, config, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed'): """ Send an email with html content, eg: send_email_smtp( 'test@example.com', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True) """ smtp_mail_from = config.get('SMTP_MAIL_FROM') to = get_email_address_list(to) msg = MIMEMultipart(mime_subtype) msg['Subject'] = subject msg['From'] = smtp_mail_from msg['To'] = ", ".join(to) recipients = to if cc: cc = get_email_address_list(cc) msg['CC'] = ", ".join(cc) recipients = recipients + cc if bcc: # don't add bcc in header bcc = get_email_address_list(bcc) recipients = recipients + bcc msg['Date'] = formatdate(localtime=True) mime_text = MIMEText(html_content, 'html') msg.attach(mime_text) for fname in files or []: basename = os.path.basename(fname) with open(fname, "rb") as f: msg.attach(MIMEApplication( f.read(), Content_Disposition='attachment; filename="%s"' % basename, Name=basename )) send_MIME_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun) def send_MIME_email(e_from, e_to, mime_msg, config, dryrun=False): SMTP_HOST = config.get('SMTP_HOST') SMTP_PORT = config.get('SMTP_PORT') SMTP_USER = config.get('SMTP_USER') SMTP_PASSWORD = config.get('SMTP_PASSWORD') SMTP_STARTTLS = config.get('SMTP_STARTTLS') SMTP_SSL = config.get('SMTP_SSL') if not dryrun: s = smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT) if SMTP_SSL else \ smtplib.SMTP(SMTP_HOST, SMTP_PORT) if SMTP_STARTTLS: s.starttls() if SMTP_USER and SMTP_PASSWORD: s.login(SMTP_USER, SMTP_PASSWORD) logging.info("Sent an alert email to " + str(e_to)) s.sendmail(e_from, e_to, mime_msg.as_string()) s.quit() else: logging.info('Dryrun enabled, email notification content is below:') logging.info(mime_msg.as_string()) def get_email_address_list(address_string): if isinstance(address_string, basestring): if ',' in address_string: address_string = address_string.split(',') elif ';' in address_string: address_string = address_string.split(';') else: address_string = [address_string] return address_string def has_access(f): """ Use this decorator to enable granular security permissions to your methods. Permissions will be associated to a role, and roles are associated to users. By default the permission's name is the methods name. Forked from the flask_appbuilder.security.decorators TODO(bkyryliuk): contribute it back to FAB """ if hasattr(f, '_permission_name'): permission_str = f._permission_name else: permission_str = f.__name__ def wraps(self, *args, **kwargs): permission_str = PERMISSION_PREFIX + f._permission_name if self.appbuilder.sm.has_access( permission_str, self.__class__.__name__): return f(self, *args, **kwargs) else: logging.warning(LOGMSG_ERR_SEC_ACCESS_DENIED.format( permission_str, self.__class__.__name__)) flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED), "danger") # adds next arg to forward to the original path once user is logged in. return redirect(url_for( self.appbuilder.sm.auth_view.__class__.__name__ + ".login", next=request.path)) f._permission_name = permission_str return functools.update_wrapper(wraps, f) def choicify(values): """Takes an iterable and makes an iterable of tuples with it""" return [(v, v) for v in values] def setup_cache(app, cache_config): """Setup the flask-cache on a flask app""" if cache_config and cache_config.get('CACHE_TYPE') != 'null': return Cache(app, config=cache_config) def zlib_compress(data): """ Compress things in a py2/3 safe fashion >>> json_str = '{"test": 1}' >>> blob = zlib_compress(json_str) """ if PY3K: if isinstance(data, str): return zlib.compress(bytes(data, "utf-8")) return zlib.compress(data) return zlib.compress(data) def zlib_decompress_to_string(blob): """ Decompress things to a string in a py2/3 safe fashion >>> json_str = '{"test": 1}' >>> blob = zlib_compress(json_str) >>> got_str = zlib_decompress_to_string(blob) >>> got_str == json_str True """ if PY3K: if isinstance(blob, bytes): decompressed = zlib.decompress(blob) else: decompressed = zlib.decompress(bytes(blob, "utf-8")) return decompressed.decode("utf-8") return zlib.decompress(blob) _celery_app = None def get_celery_app(config): global _celery_app if _celery_app: return _celery_app _celery_app = celery.Celery(config_source=config.get('CELERY_CONFIG')) return _celery_app
apache-2.0
-583,671,584,770,961,500
28.675716
82
0.616925
false
micahhausler/pandashells
pandashells/test/p_sig_edit_test.py
7
2377
#! /usr/bin/env python import subprocess from mock import patch, MagicMock from unittest import TestCase import pandas as pd import numpy as np try: from StringIO import StringIO except ImportError: from io import StringIO from pandashells.bin.p_sig_edit import ( main, ) class MainUnitTest(TestCase): @patch('pandashells.bin.p_sig_edit.io_lib') @patch('pandashells.bin.p_sig_edit.outlier_lib.sigma_edit_dataframe') @patch('pandashells.bin.p_sig_edit.argparse.ArgumentParser') def test_from_input_to_output( self, arg_parser_mock, sig_edit_mock, io_lib_mock): df_in = pd.DataFrame([ {'a': 1, 'b': 10}, {'a': 2, 'b': 20}, {'a': 3, 'b': 30}, {'a': 4, 'b': 40}, ]) args = MagicMock() args.sigma_thresh = [3.] args.cols = ['a'] args.max_iter = [20] parser = MagicMock(parse_args=MagicMock(return_value=args)) arg_parser_mock.return_value = parser arg_parser_mock.parse_args = args io_lib_mock.df_to_output = MagicMock() io_lib_mock.df_from_input = MagicMock(return_value=df_in) main() sig_edit_mock.assert_called_with(3., ['a'], df_in, max_iter=20) self.assertTrue(io_lib_mock.df_to_output.called) class IntegrationTests(TestCase): def setUp(self): self.df = pd.DataFrame([ {'a': 1}, {'a': 2}, {'a': 1}, {'a': 2}, {'a': 1}, {'a': 2}, {'a': 1}, {'a': 2}, {'a': 6}, ]) def get_command_result(self, cmd): p = subprocess.Popen( ['bash', '-c', cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate( self.df.to_csv(index=False).encode('utf-8')) return stdout.decode('utf-8').strip() def test_editing(self): cmd = 'p.sig_edit -t 2 -c a' df = pd.read_csv(StringIO(self.get_command_result(cmd))) self.assertTrue(np.isnan(df.a.iloc[-1])) self.assertEqual(len(df.dropna()), 8) def test_bad_iter(self): cmd = 'p.sig_edit -t 2 -c a --max_iter 0' with self.assertRaises(ValueError): pd.read_csv(StringIO(self.get_command_result(cmd)))
bsd-2-clause
734,556,275,100,795,000
28.345679
73
0.545225
false
arashzamani/lstm_nlg_ver1
test_cases/test_v1.py
1
3830
from __future__ import print_function from __future__ import unicode_literals from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.layers import LSTM from keras.optimizers import RMSprop from keras.utils.data_utils import get_file from hazm import * import numpy as np import locale import random import sys import codecs import math import nltk.data import language_parser.word as w f = codecs.open("/home/arash/Downloads/bbc_test.txt", 'r', encoding='utf8') text = f.read() #text = open(path).read() print('corpus length:', len(text)) sents = sent_tokenize(text) words = w.pure_word_tokenize(text) # words = word_tokenize(text) print ('len of words are:', len(words)) unique_words = dict() for i in range(0, len(words), 1): if words[i] in unique_words.keys(): unique_words[words[i]] += 1 else: unique_words[words[i]] = 1 print ("unique words: ", len(unique_words)) thefile = codecs.open("/home/arash/Downloads/unique_words.txt", 'w', encoding='utf8') # for keys in unique_words.keys(): # thefile.write("%s\t%s\n" % keys % unique_words[keys]) # # thefile.close() #in maximum len bayady miangin tedad kalamat jomleh bashe maxlen = 67 step = 3 sentences = [] next_words = [] indices_word = dict((i, c) for i, c in enumerate(unique_words.keys())) for i in range(0, len(words) - maxlen, step): sentences.append(words[i: i + maxlen]) next_words.append(words[i + maxlen]) print('nb sequences:', len(next_words)) print('Vectorization...') X = np.zeros((len(sentences), maxlen, len(unique_words)), dtype=np.bool) y = np.zeros((len(sentences), len(unique_words)), dtype=np.bool) for i, sentence in enumerate(sentences): for t, word in enumerate(sentence): X[i, t, unique_words[word]] = 1 y[i, unique_words[next_words[i]]] = 1 # build the model: a single LSTM print('Build model...') model = Sequential() model.add(LSTM(128, input_shape=(maxlen, len(unique_words)))) model.add(Dense(len(unique_words))) model.add(Activation('softmax')) optimizer = RMSprop(lr=0.01) model.compile(loss='categorical_crossentropy', optimizer=optimizer) def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) # train the model, output generated text after each iteration for iteration in range(1, 20): print() print('-' * 50) print('Iteration', iteration) model.fit(X, y, batch_size=128, nb_epoch=1) start_index = random.randint(0, len(words) - maxlen - 1) for diversity in [0.2, 0.5, 1.0, 1.2]: print() print('----- diversity:', diversity) generated = '' space = u' ' sentence = words[start_index: start_index + maxlen - 1] newSentence = [sentence[0]] for temp in sentence: newSentence.append(space) newSentence.append(temp) for temp in newSentence: generated += temp print('----- Generating with seed: "', newSentence, '"') sys.stdout.write(generated) for i in range(400): x = np.zeros((1, maxlen, len(unique_words))) for t, word in enumerate(sentence): x[0, t, unique_words[word]] = 1. preds = model.predict(x, verbose=0)[0] next_index = sample(preds, diversity) next_word = indices_word[next_index] generated += space + next_word newSentence = newSentence[1:] + [space, next_word] sys.stdout.write(next_word) sys.stdout.write(space) sys.stdout.flush() print()
gpl-3.0
-6,475,949,534,357,418,000
28.929688
85
0.644648
false
mlperf/training_results_v0.6
Google/benchmarks/gnmt/implementations/tpu-v3-1024-gnmt/nmt/model.py
5
35018
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic sequence-to-sequence model with dynamic RNN support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import numpy as np import tensorflow as tf from tensorflow.python.data.util import nest from tensorflow.python.ops import inplace_ops import beam_search_decoder import decoder import model_helper from utils import misc_utils as utils utils.check_tensorflow_version() __all__ = ["BaseModel"] def dropout(shape, dtype, keep_ratio): """Dropout helper function.""" return tf.math.floor(tf.random.uniform(shape, dtype=dtype) + keep_ratio) / keep_ratio def lstm_cell_split(gate_inputs, c, padding): """Helper function to perform inexpensive activation of lstm cell.""" # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=1) new_c = c * tf.math.sigmoid(f) + tf.math.sigmoid(i) * tf.math.tanh(j) new_h = tf.math.tanh(new_c) * tf.math.sigmoid(o) if padding is not None: new_c = new_c * tf.expand_dims(padding, 1) new_h = new_h * tf.expand_dims(padding, 1) new_state = {"c": new_c, "h": new_h} return new_state def lstm_cell_gate(theta, h, inputs): """Helper function to performan expensive matmul of lstm cell.""" kernel, bias = theta["kernel"], theta["bias"] gate_inputs = tf.matmul(h, kernel) + inputs gate_inputs = tf.nn.bias_add(gate_inputs, bias) return gate_inputs def lstm_cell_grad(theta, state0, inputs, extras, dstate1): """Gradient function for lstm_cell.""" padding = inputs["padding"] if (inputs is not None and "padding" in inputs) else None state1 = nest.flatten(lstm_cell_split(extras, state0["c"], padding)) dstate1 = nest.flatten(dstate1) grad = tf.gradients(state1, [extras], dstate1)[0] dtheta = { "bias": tf.reduce_sum(grad, 0) } dinputs = {"rnn": grad} dstate = {"c": tf.gradients(state1, state0["c"], dstate1)[0]} dstate["h"] = tf.matmul(grad, tf.transpose(theta["kernel"])) if padding is not None: dinputs["padding"] = padding return dtheta, dstate, dinputs def lstm_cell(theta, state, inputs): c, h = state["c"], state["h"] gate_inputs = lstm_cell_gate(theta, h, inputs["rnn"]) padding = inputs["padding"] if "padding" in inputs else None return lstm_cell_split(gate_inputs, c, padding), gate_inputs def attention(theta, new_lstm_state): """Helper function to add attention.""" lstm_output = new_lstm_state["h"] query = tf.expand_dims(tf.matmul(lstm_output, theta["query_kernel"]), 0) normed_v = theta["atten_g"] * theta["atten_v"] * tf.rsqrt( tf.reduce_sum(tf.square(theta["atten_v"]))) score = tf.reduce_sum( normed_v * tf.tanh(theta["keys"] + query + theta["atten_b"]), [2]) score = tf.transpose(score) score = tf.where( theta["seq_mask"] > 0.5, score, tf.ones_like(score) * tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf)) alignments = tf.nn.softmax(score) score = tf.transpose(alignments) atten = tf.reduce_sum(tf.expand_dims(score, 2) * theta["values"], 0) new_states = { "attention": atten, "alignments": alignments } for k in new_lstm_state: new_states[k] = new_lstm_state[k] return new_states def attention_cell_grad(theta, state0, unused_inputs, extras, dstate1): """Gradient function for attention_cell.""" new_lstm_state = lstm_cell_split(extras, state0["c"], None) new_states = attention(theta, new_lstm_state) del new_states["alignments"] y = nest.flatten(new_states) x = [extras, state0["c"]] + nest.flatten(theta) dy = nest.flatten(dstate1) g = tf.gradients(y, x, dy) dtheta = nest.pack_sequence_as(theta, g[2:]) grad, dstate_c = g[:2] dtheta["bias"] = tf.reduce_sum(grad, 0) datten = tf.matmul(grad, tf.transpose(theta["attention_kernel"])) dstate_h = tf.matmul(grad, tf.transpose(theta["kernel"])) dstate = { "h": dstate_h, "c": dstate_c, "attention": datten, } return dtheta, dstate, {"rnn": grad} def attention_cell(theta, state, inputs): """Attention cell followed by LSTM cells.""" lstm_input = { "rnn": inputs["rnn"] + tf.matmul(state["attention"], theta["attention_kernel"]) } new_lstm_state, gate = lstm_cell(theta, state, lstm_input) return attention(theta, new_lstm_state), gate def input_projection(inputs, theta, max_seq_len): """Helper function to do projection on inputs.""" max_time, batch_size, _ = inputs.shape factor = tf.maximum(1, 512 // batch_size) factored_batch = batch_size * factor factored_time = max_time // factor n = max_time * batch_size if n < 1024 or n % 512 != 0: return tf.reshape( tf.matmul(tf.reshape(inputs, [n, -1]), theta), [max_time, batch_size, -1]) factored_shape = [factored_time, factored_batch, -1] input_reshape = tf.reshape(inputs, factored_shape) max_length = tf.where( tf.equal(max_seq_len % factor, 0), max_seq_len // factor, max_seq_len // factor + 1) @tf.custom_gradient def _input_projection(inputs, theta): """Function that does projection with early stop.""" def cell_fn(theta, output, i): input_slice = tf.gather(input_reshape, i) output = inplace_ops.alias_inplace_update(output, i, tf.matmul(input_slice, theta)) return theta, output, i + 1 def cell_grad_fn(dtheta, dy, dinput, i): dy_slice = tf.gather(dy, i) input_slice = tf.gather(input_reshape, i) dtheta = dtheta + tf.matmul(tf.transpose(input_slice), dy_slice) dinput = inplace_ops.alias_inplace_update( dinput, i, tf.matmul(dy_slice, tf.transpose(theta))) return dtheta, dy, dinput, i + 1 def grad(dy): dtheta, _, dinput, _ = tf.while_loop( lambda dtheta, dy, dinput, i: i < max_length, cell_grad_fn, [ tf.zeros(theta.shape, dy.dtype), dy, tf.zeros_like(input_reshape), tf.zeros([], tf.int32) ]) return dinput, dtheta output = tf.while_loop(lambda theta, output, i: i < max_length, cell_fn, [ theta, tf.zeros(factored_shape[:2] + [theta.shape[-1]], inputs.dtype), tf.zeros([], tf.int32) ])[1] return output, grad return tf.reshape( _input_projection(input_reshape, theta), [max_time, batch_size, -1]) def build_rnn(orig_theta, state0, orig_inputs, cell_fn, cell_grad, max_length, reverse=False): """Helper function to build an RNN.""" max_time, batch_size = orig_inputs["rnn"].shape.as_list()[:2] skipped_theta = ["kernel", "attention_kernel", "memory_kernel", "seq_mask"] skipped_state = ["alignments"] @tf.custom_gradient def _rnn(*inp): """Function that drives RNN with early stop.""" inputs = nest.pack_sequence_as(orig_inputs, inp[0:len(orig_inputs)]) theta = nest.pack_sequence_as(orig_theta, inp[len(orig_inputs):]) def _cell_fn(theta, state0, acc_state, acc_gate, i): """RNN cell function.""" input_slice = {k: tf.gather(inputs[k], i) for k in inputs} state1, gate = cell_fn(theta, state0, input_slice) for k in state0: if k not in skipped_state: acc_state[k] = tf.stop_gradient( inplace_ops.alias_inplace_update(acc_state[k], i, state1[k])) acc_gate = tf.stop_gradient( inplace_ops.alias_inplace_update(acc_gate, i, gate)) return theta, state1, acc_state, acc_gate, i - 1 if reverse else i + 1 def _should_continue(i, is_backward=False): if is_backward: return i < max_length - 1 if reverse else i > 0 else: return i >= 0 if reverse else i < max_length acc_state = { k: tf.zeros([max_time, batch_size, state0["c"].shape[-1]], state0["c"].dtype) for k in state0 if k not in skipped_state } acc_state, acc_gate = tf.while_loop( lambda theta, state0, acc_state, acc_gate, i: _should_continue(i), _cell_fn, [ theta, state0, acc_state, tf.zeros_like(inputs["rnn"]), max_length - 1 if reverse else tf.zeros([], tf.int32) ])[2:4] ret = {"h": acc_state["h"]} if "attention" in acc_state: ret["attention"] = acc_state["attention"] def _cell_grad_fn_with_state0(state0, theta, dy, dstate1, dtheta, dinput, i): """Gradient cell function.""" state0 = { k: tf.stop_gradient(state0[k]) for k in state0 if k not in skipped_state } theta = {k: tf.stop_gradient(theta[k]) for k in theta} if "padding" in inputs: inputs_slice = {"padding": tf.gather(inputs["padding"], i)} else: inputs_slice = None gate = tf.gather(acc_gate, i) for k in dy: dstate1[k] = dstate1[k] + tf.gather(dy[k], i) dt, dstate, di = cell_grad(theta, state0, inputs_slice, gate, dstate1) dtheta = {k: dtheta[k] + dt[k] for k in dtheta if k not in skipped_theta} dinput = { k: inplace_ops.alias_inplace_update(dinput[k], i, di[k]) for k in di } return theta, dy, dstate, dtheta, dinput, i + 1 if reverse else i - 1 def _cell_grad_fn(theta, dy, dstate1, dtheta, dinput, i): """Gradient cell function wrapper.""" return _cell_grad_fn_with_state0( { k: tf.gather(acc_state[k], i + 1 if reverse else i - 1) for k in acc_state }, theta, dy, dstate1, dtheta, dinput, i) def grad(*dy): """Gradient function for build_rnn.""" dy = nest.pack_sequence_as(ret, dy) def _continue(unused_theta, unused_dy, unused_dstate1, unused_dtheta, unused_dinput, i): return _should_continue(i, True) dstate1, dtheta, dinput = tf.while_loop(_continue, _cell_grad_fn, [ theta, dy, { k: tf.zeros_like(state0[k]) for k in state0 if k not in skipped_state }, {k: tf.zeros_like(theta[k]) for k in theta if k not in skipped_theta}, {k: tf.zeros_like(inputs[k]) for k in inputs}, tf.zeros([], tf.int32) if reverse else max_length - 1, ])[2:5] dtheta, dinput = _cell_grad_fn_with_state0( state0, theta, dy, dstate1, dtheta, dinput, max_length - 1 if reverse else tf.zeros([], dtype=tf.int32))[3:5] state0_h = tf.reshape(acc_state["h"], [-1, theta["kernel"].shape[0]]) state0_atten = tf.reshape(acc_state["attention"], [-1, theta["attention_kernel"].shape[0] ]) if "attention_kernel" in theta else None grad = tf.reshape(dinput["rnn"], [-1, theta["kernel"].shape[1]]) if reverse: state0_h = tf.split(state0_h, [batch_size, -1])[1] grad = tf.split(grad, [-1, batch_size])[0] else: if state0_atten is not None: state0_atten = tf.split(state0_atten, [-1, batch_size])[0] state0_h = tf.split(state0_h, [-1, batch_size])[0] grad = tf.split(grad, [batch_size, -1])[1] if state0_atten is not None: dtheta["attention_kernel"] = tf.matmul(tf.transpose(state0_atten), grad) dtheta["kernel"] = tf.matmul(tf.transpose(state0_h), grad) if "memory_kernel" in orig_theta: dtheta["memory_kernel"] = tf.zeros_like(orig_theta["memory_kernel"]) dtheta["seq_mask"] = tf.zeros_like(orig_theta["seq_mask"]) return dinput, dtheta return ret, grad return dict( _rnn(*(tuple(nest.flatten(orig_inputs)) + tuple(nest.flatten(orig_theta))))) def build_uni_rnn(inputs, max_seq_len, num_units, name, reverse=False): """Build the uni-directional RNN.""" theta = {} _, batch_size, input_feature_dim = inputs["rnn"].shape dtype = inputs["rnn"].dtype with tf.variable_scope(name, reuse=tf.AUTO_REUSE): theta = { "kernel": tf.get_variable("kernel", [num_units, num_units * 4]), "bias": tf.get_variable("bias", [num_units * 4]) } state0 = { "h": tf.zeros([batch_size, num_units], dtype=dtype), "c": tf.zeros([batch_size, num_units], dtype=dtype) } input_kernel = tf.get_variable("input_kernel", [input_feature_dim, num_units * 4]) inp = {"rnn": input_projection(inputs["rnn"], input_kernel, max_seq_len)} if "padding" in inputs: inp["padding"] = inputs["padding"] output = build_rnn(theta, state0, inp, lstm_cell, lstm_cell_grad, max_seq_len, reverse) return output["h"] def build_bid_rnn(inputs, seq_len, num_units, name): """Build the bi-directional RNN.""" max_seq_len = tf.reduce_max(seq_len) fwd = build_uni_rnn(inputs, max_seq_len, num_units, name + "/fw/cell_fn/basic_lstm_cell", False) bwd_inputs = {k: inputs[k] for k in inputs} bwd_inputs["padding"] = tf.transpose( tf.sequence_mask(seq_len, inputs["rnn"].shape[0], inputs["rnn"].dtype)) bwd = build_uni_rnn(bwd_inputs, max_seq_len, num_units, name + "/bw/cell_fn/basic_lstm_cell", True) return tf.concat([fwd, bwd], -1) def build_atten_rnn(encoder_outputs, src_seq_len, num_units, beam_width, name): """Build the attention decoder RNN.""" dtype = encoder_outputs.dtype max_time, batch_size, input_feature_dim = encoder_outputs.shape if beam_width > 1: encoder_outputs = tf.reshape( tf.tile(encoder_outputs, [1, 1, beam_width]), [max_time, batch_size * beam_width, input_feature_dim]) src_seq_len = tf.reshape( tf.tile(tf.reshape(src_seq_len, [-1, 1]), [1, beam_width]), [-1]) batch_size = batch_size * beam_width with tf.variable_scope("memory_layer", reuse=tf.AUTO_REUSE): memory_kernel = tf.get_variable("kernel", [num_units, num_units]) keys = tf.reshape( tf.matmul( tf.reshape(encoder_outputs, [max_time * batch_size, -1]), memory_kernel), [max_time, batch_size, -1]) seq_mask = tf.sequence_mask(src_seq_len, max_time, dtype) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): theta = [] state0 = [] input_kernels = [] with tf.variable_scope("cell_0_attention/attention"): with tf.variable_scope("cell_fn/basic_lstm_cell"): kernel = tf.get_variable("kernel", [num_units * 1, num_units * 4]) input_kernel = tf.get_variable("input_kernel", [num_units, num_units * 4]) attention_kernel = tf.get_variable("attention_kernel", [num_units, num_units * 4]) bias = tf.get_variable("bias", [num_units * 4]) with tf.variable_scope("bahdanau_attention"): with tf.variable_scope("query_layer"): query_kernel = tf.get_variable("kernel", [num_units, num_units]) input_kernels.append(input_kernel) theta.append({ "kernel": kernel, "attention_kernel": attention_kernel, "bias": bias, "memory_kernel": memory_kernel, "query_kernel": query_kernel, "atten_v": tf.get_variable("attention_v", [num_units]), "atten_g": tf.get_variable( "attention_g", [], initializer=tf.constant_initializer( math.sqrt(1. / num_units))), "atten_b": tf.get_variable( "attention_b", [num_units], initializer=tf.zeros_initializer()), "keys": keys, "values": encoder_outputs, "seq_mask": seq_mask }) state0.append({ "c": tf.zeros([batch_size, num_units], dtype=dtype), "h": tf.zeros([batch_size, num_units], dtype=dtype), "attention": tf.zeros([batch_size, num_units], dtype=dtype), "alignments": tf.zeros([batch_size, max_time], dtype=dtype) }) for i in range(1, 4): with tf.variable_scope("cell_%d/cell_fn/basic_lstm_cell" % i): theta.append({ "kernel": tf.get_variable("kernel", [num_units, num_units * 4]), "bias": tf.get_variable("bias", [num_units * 4]) }) input_kernels.append( tf.get_variable("input_kernel", [num_units * 2, num_units * 4])) state0.append({ "c": tf.zeros([batch_size, num_units], dtype=dtype), "h": tf.zeros([batch_size, num_units], dtype=dtype), }) return theta, input_kernels, state0 class BaseModel(object): """Sequence-to-sequence base class. """ def __init__(self, hparams, mode, features): """Create the model. Args: hparams: Hyperparameter configurations. mode: TRAIN | EVAL | INFER features: a dict of input features. """ # Set params self._set_params_initializer(hparams, mode, features) # Train graph with tf.variable_scope("nmt", reuse=tf.AUTO_REUSE): self.init_embeddings(hparams) res = self.build_graph(hparams) self._set_train_or_infer(res, hparams) def _emb_lookup(self, weight, index, is_decoder=False): return tf.cast( tf.reshape( tf.gather(weight, tf.reshape(index, [-1])), [index.shape[0], index.shape[1], -1]), self.dtype) def _set_params_initializer(self, hparams, mode, features): """Set various params for self and initialize.""" self.mode = mode self.src_vocab_size = hparams.src_vocab_size self.tgt_vocab_size = hparams.tgt_vocab_size self.features = features self.dtype = tf.as_dtype(hparams.activation_dtype) self.single_cell_fn = None # Set num units self.num_units = hparams.num_units self.eos_id = hparams.tgt_eos_id self.label_smoothing = hparams.label_smoothing # Set num layers self.num_encoder_layers = hparams.num_encoder_layers self.num_decoder_layers = hparams.num_decoder_layers assert self.num_encoder_layers assert self.num_decoder_layers # Batch size self.batch_size = tf.size(self.features["source_sequence_length"]) # Global step # Use get_global_step instead of user-defied global steps. Otherwise the # num_train_steps in TPUEstimator.train has no effect (will train forever). # TPUestimator only check if tf.train.get_global_step() < num_train_steps self.global_step = tf.train.get_or_create_global_step() # Initializer self.random_seed = hparams.random_seed initializer = model_helper.get_initializer( hparams.init_op, self.random_seed, hparams.init_weight) tf.get_variable_scope().set_initializer(initializer) # Embeddings self.encoder_emb_lookup_fn = ( self._emb_lookup if self.mode == tf.contrib.learn.ModeKeys.TRAIN else tf.nn.embedding_lookup) def _set_train_or_infer(self, res, hparams): """Set up training.""" if self.mode == tf.contrib.learn.ModeKeys.INFER: self.predicted_ids = res[1] params = tf.trainable_variables() # Gradients and SGD update operation for training the model. # Arrange for the embedding vars to appear at the beginning. if self.mode == tf.contrib.learn.ModeKeys.TRAIN: loss = res[0] self.learning_rate = tf.constant(hparams.learning_rate) # warm-up self.learning_rate = self._get_learning_rate_warmup(hparams) # decay self.learning_rate = self._get_learning_rate_decay(hparams) # Optimizer if hparams.optimizer == "sgd": opt = tf.train.GradientDescentOptimizer(self.learning_rate) elif hparams.optimizer == "adam": opt = tf.train.AdamOptimizer(self.learning_rate) else: raise ValueError("Unknown optimizer type %s" % hparams.optimizer) if hparams.use_tpu: opt = tf.contrib.tpu.CrossShardOptimizer(opt) # Gradients gradients = tf.gradients(loss, params, colocate_gradients_with_ops=True) clipped_grads, grad_norm = model_helper.gradient_clip( gradients, max_gradient_norm=hparams.max_gradient_norm) self.grad_norm = grad_norm self.update = opt.apply_gradients( zip(clipped_grads, params), global_step=self.global_step) # Print trainable variables utils.print_out("# Trainable variables") utils.print_out("Format: <name>, <shape>, <(soft) device placement>") for param in params: utils.print_out(" %s, %s, %s" % (param.name, str(param.get_shape()), param.op.device)) def _get_learning_rate_warmup(self, hparams): """Get learning rate warmup.""" warmup_steps = hparams.warmup_steps warmup_scheme = hparams.warmup_scheme utils.print_out(" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s" % (hparams.learning_rate, warmup_steps, warmup_scheme)) # Apply inverse decay if global steps less than warmup steps. # Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3) # When step < warmup_steps, # learing_rate *= warmup_factor ** (warmup_steps - step) if warmup_scheme == "t2t": # 0.01^(1/warmup_steps): we start with a lr, 100 times smaller warmup_factor = tf.exp(tf.log(0.01) / warmup_steps) inv_decay = warmup_factor**(tf.to_float(warmup_steps - self.global_step)) else: raise ValueError("Unknown warmup scheme %s" % warmup_scheme) return tf.cond( self.global_step < hparams.warmup_steps, lambda: inv_decay * self.learning_rate, lambda: self.learning_rate, name="learning_rate_warump_cond") def _get_learning_rate_decay(self, hparams): """Get learning rate decay.""" return tf.cond( self.global_step < hparams.decay_start, lambda: self.learning_rate, lambda: tf.maximum( # pylint: disable=g-long-lambda tf.train.exponential_decay( self.learning_rate, self.global_step - hparams.decay_start, hparams.decay_interval, hparams.decay_factor, staircase=True), self.learning_rate * tf.pow(hparams.decay_factor, hparams. decay_steps)), name="learning_rate_decay_cond") def init_embeddings(self, hparams): """Init embeddings.""" self.embedding_encoder, self.embedding_decoder = ( model_helper.create_emb_for_encoder_and_decoder( src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, src_embed_size=self.num_units, tgt_embed_size=self.num_units, num_enc_partitions=hparams.num_enc_emb_partitions, num_dec_partitions=hparams.num_dec_emb_partitions, src_vocab_file=hparams.src_vocab_file, tgt_vocab_file=hparams.tgt_vocab_file, src_embed_file=hparams.src_embed_file, tgt_embed_file=hparams.tgt_embed_file, )) def _build_model(self, hparams): """Builds a sequence-to-sequence model. Args: hparams: Hyperparameter configurations. Returns: For infrence, A tuple of the form (logits, decoder_cell_outputs, predicted_ids), where: logits: logits output of the decoder. decoder_cell_outputs: the output of decoder. predicted_ids: predicted ids from beam search. For training, returns the final loss """ # Encoder self.encoder_outputs = self._build_encoder(hparams) ## Decoder return self._build_decoder(self.encoder_outputs, hparams) def build_graph(self, hparams): """Subclass must implement this method. Creates a sequence-to-sequence model with dynamic RNN decoder API. Args: hparams: Hyperparameter configurations. Returns: A tuple of the form (logits, predicted_ids) for infererence and (loss, None) for training. where: logits: float32 Tensor [batch_size x num_decoder_symbols] loss: float32 scalar predicted_ids: predicted ids from beam search. """ utils.print_out("# Creating %s graph ..." % self.mode) # Projection with tf.variable_scope("build_network"): with tf.variable_scope("decoder/output_projection", reuse=tf.AUTO_REUSE): self.output_layer = tf.get_variable( "output_projection", [self.num_units, self.tgt_vocab_size]) with tf.variable_scope( "dynamic_seq2seq", dtype=self.dtype, reuse=tf.AUTO_REUSE): if hparams.activation_dtype == "bfloat16": tf.get_variable_scope().set_custom_getter( utils.bfloat16_var_getter if hparams.activation_dtype == "bfloat16" else None) logits_or_loss, decoder_cell_outputs, predicted_ids = self._build_model( hparams) if decoder_cell_outputs is not None: decoder_cell_outputs = tf.cast(decoder_cell_outputs, tf.float32) else: logits_or_loss, decoder_cell_outputs, predicted_ids = self._build_model( hparams) return logits_or_loss, predicted_ids def _get_infer_maximum_iterations(self, hparams, source_sequence_length): """Maximum decoding steps at inference time.""" if hparams.tgt_max_len_infer: maximum_iterations = hparams.tgt_max_len_infer utils.print_out(" decoding maximum_iterations %d" % maximum_iterations) else: decoding_length_factor = 2.0 max_encoder_length = tf.reduce_max(source_sequence_length) maximum_iterations = tf.to_int32( tf.round(tf.to_float(max_encoder_length) * decoding_length_factor)) return maximum_iterations def _compute_loss(self, theta, inputs, factored_batch_size=None): """Final projection layer and computes the loss.""" logits = tf.cast( tf.matmul(tf.cast(inputs[0], theta.dtype), theta), tf.float32) if factored_batch_size is not None: logits.set_shape([factored_batch_size, self.tgt_vocab_size]) target = tf.cast(tf.reshape(inputs[1], [-1]), tf.int32) crossent = tf.losses.softmax_cross_entropy( tf.one_hot(target, self.tgt_vocab_size, dtype=logits.dtype), logits, label_smoothing=self.label_smoothing, reduction=tf.losses.Reduction.NONE) crossent = tf.where(target == self.eos_id, tf.zeros_like(crossent), crossent) return tf.reshape(crossent, [-1]), [] def _build_decoder(self, encoder_outputs, hparams): """Build and run a RNN decoder with a final projection layer. Args: encoder_outputs: The outputs of encoder for every time step. hparams: The Hyperparameters configurations. Returns: For inference, A tuple of final logits and final decoder state: logits: size [time, batch_size, vocab_size] For training, returns the final loss """ ## Decoder. with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE) as decoder_scope: # Optional ops depends on which mode we are in and which loss function we # are using. logits = tf.no_op() decoder_cell_outputs = None if self.mode == tf.contrib.learn.ModeKeys.TRAIN: beam_width = 1 else: beam_width = hparams.beam_width theta, input_kernels, state0 = build_atten_rnn( encoder_outputs, self.features["source_sequence_length"], hparams.num_units, beam_width, "multi_rnn_cell") ## Train or eval if self.mode != tf.contrib.learn.ModeKeys.INFER: # decoder_emp_inp: [max_time, batch_size, num_units] target_input = self.features["target_input"] batch_size, max_time = target_input.shape target_input = tf.transpose(target_input) decoder_emb_inp = self._emb_lookup( self.embedding_decoder, target_input, is_decoder=True) seq_len = self.features["target_sequence_length"] padding = tf.transpose( tf.sequence_mask(seq_len, target_input.shape[0], decoder_emb_inp.dtype)) max_seq_len = tf.reduce_max(seq_len) o = decoder_emb_inp if self.mode == tf.contrib.learn.ModeKeys.TRAIN: o = o * dropout(o.shape, o.dtype, 1.0 - hparams.dropout) inp = {"rnn": input_projection(o, input_kernels[0], max_seq_len)} new_states = build_rnn(theta[0], state0[0], inp, attention_cell, attention_cell_grad, max_seq_len) attention_state = new_states["attention"] o = new_states["h"] for i in range(1, 4): c = tf.concat([o, attention_state], -1) if self.mode == tf.contrib.learn.ModeKeys.TRAIN: c = c * dropout(c.shape, c.dtype, 1.0 - hparams.dropout) inp = {"rnn": input_projection(c, input_kernels[i], max_seq_len)} out = build_rnn(theta[i], state0[i], inp, lstm_cell, lstm_cell_grad, max_seq_len) o = out["h"] + o if i > 1 else out["h"] out = o * tf.expand_dims(padding, 2) if batch_size * max_time < 1024: return tf.reduce_sum( self._compute_loss(self.output_layer, [ tf.reshape(out, [-1, self.num_units]), tf.transpose(self.features["target_output"]) ])[0]), None, None # 512 batch dimension yields best tpu efficiency. factor = tf.maximum(1, 512 // self.batch_size) factored_batch = self.batch_size * factor input1 = tf.reshape(out, [-1, factored_batch, self.num_units]) input2 = tf.reshape( tf.transpose(self.features["target_output"]), [-1, factored_batch, 1]) max_length = tf.reduce_max(self.features["target_sequence_length"]) max_length = tf.where( tf.equal(max_length % factor, 0), max_length // factor, max_length // factor + 1) inputs = [input1, input2] def _cell_fn(theta, _, state): return self._compute_loss(theta, state, 512) loss, _ = tf.contrib.recurrent.Recurrent( theta=self.output_layer, state0=tf.zeros([512], tf.float32), inputs=inputs, cell_fn=_cell_fn, max_input_length=max_length, use_tpu=True) return tf.reduce_sum(loss), None, None ## Inference else: assert hparams.infer_mode == "beam_search" start_tokens = tf.fill([self.batch_size], hparams.tgt_sos_id) end_token = hparams.tgt_eos_id beam_width = hparams.beam_width length_penalty_weight = hparams.length_penalty_weight coverage_penalty_weight = hparams.coverage_penalty_weight # maximum_iteration: The maximum decoding steps. maximum_iterations = self._get_infer_maximum_iterations( hparams, self.features["source_sequence_length"]) def cell_fn(inputs, state): """Cell function used in decoder.""" inp = {"rnn": tf.matmul(inputs, input_kernels[0])} atten_state, _ = attention_cell(theta[0], state[0], inp) o = atten_state["h"] new_states = [atten_state] for i in range(1, 4): ns, _ = lstm_cell( theta[i], state[i], { "rnn": tf.matmul( tf.concat([o, atten_state["attention"]], -1), input_kernels[i]) }) new_states.append(ns) if i > 1: o = ns["h"] + o else: o = ns["h"] return new_states, o my_decoder = beam_search_decoder.BeamSearchDecoder( cell=cell_fn, embedding=self.embedding_decoder, start_tokens=start_tokens, end_token=end_token, initial_state=state0, beam_width=beam_width, output_layer=self.output_layer, max_tgt=maximum_iterations, length_penalty_weight=length_penalty_weight, coverage_penalty_weight=coverage_penalty_weight, dtype=self.dtype) # Dynamic decoding predicted_ids = decoder.dynamic_decode( my_decoder, maximum_iterations=maximum_iterations, swap_memory=True, scope=decoder_scope) return logits, decoder_cell_outputs, predicted_ids def _prepare_beam_search_decoder_inputs(self, beam_width, memory, source_sequence_length): memory = tf.contrib.seq2seq.tile_batch(memory, multiplier=beam_width) source_sequence_length = tf.contrib.seq2seq.tile_batch( source_sequence_length, multiplier=beam_width) batch_size = self.batch_size * beam_width return memory, source_sequence_length, batch_size def _build_encoder(self, hparams): """Build a GNMT encoder.""" source = self.features["source"] source = tf.transpose(source) with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE): emb = tf.cast( self.encoder_emb_lookup_fn(self.embedding_encoder, source), self.dtype) seq_len = self.features["source_sequence_length"] padding = tf.transpose( tf.sequence_mask(seq_len, emb.shape[0], self.dtype)) max_seq_len = tf.reduce_max(seq_len) if self.mode == tf.contrib.learn.ModeKeys.TRAIN: emb = emb * dropout(emb.shape, emb.dtype, 1.0 - hparams.dropout) out = build_bid_rnn({"rnn": emb}, seq_len, hparams.num_units, "bidirectional_rnn") out = out * tf.expand_dims(padding, 2) for i in range(3): orig_out = out if self.mode == tf.contrib.learn.ModeKeys.TRAIN: out = out * dropout(out.shape, emb.dtype, 1.0 - hparams.dropout) inputs = {"rnn": out} o = build_uni_rnn(inputs, max_seq_len, hparams.num_units, "rnn/uni_rnn_cell_%d" % i) if i > 0: o = o + orig_out out = o out = out * tf.expand_dims(padding, 2) return out
apache-2.0
-7,594,507,182,426,884,000
36.898268
80
0.600034
false
orbkit/orbkit
examples/orbkit_applications/H2+_stefd/h2+_stefd.py
1
1513
# Import the functions of orbkit from orbkit import grid,read,core,output,display # Import general modules import numpy try: from enthought.mayavi import mlab except ImportError: from mayavi import mlab # Name of input and output files fid_in = 'h2+.molden' # Choose the molecular orbitals to be calculated selected_MO = ['1.1_a','1.5_a'] # number of processes and points per process numproc = 2 slice_length = 1e4 # Open molden file and read parameters qc = read.main_read(fid_in, itype='molden',all_mo=True) # Set grid parameters grid.adjust_to_geo(qc,extend=5.0,step=0.5) # Initialize grid grid.grid_init() # Print grid information display.display(grid.get_grid()) # Choose the molecular orbitals to be calculated selected_MO = ['1.1 alpha','1.5 alpha'] qc.mo_spec = qc.mo_spec.select(selected_MO) # Calculate molecular orbitals mo_list = core.rho_compute(qc,calc_mo=True,slice_length=slice_length,drv=None, numproc=numproc) # Calculate analytic derivatives of molecular orbitals mo_list_drv = core.rho_compute(qc,calc_mo=True,slice_length=slice_length,drv='xyz', numproc=numproc) # Calculate Transition Electronic Flux Densities (time independent) # Calculate the STEFD [in units of (i E_h/(hbar a_0^2))] j_stefd = -0.5 * ( mo_list[numpy.newaxis,0] * mo_list_drv[:,1] - mo_list[numpy.newaxis,1] * mo_list_drv[:,0]) Z,Y,X = output.meshgrid2(*grid.tolist()[::-1]) mlab.quiver3d(X,Y,Z,*j_stefd) mlab.show()
lgpl-3.0
3,199,154,708,520,079,000
28.096154
83
0.691342
false
JonasWallin/logisticnormal
logisticnormal/priors.py
1
9018
''' Created on Jul 2, 2014 @author: jonaswallin ''' import numpy as np import numpy.random as npr import pickle from .distribution_cython import invWishart, Multivariatenormal, Wishart, MultivariatenormalScalingCython # @UnresolvedImport from .PurePython.priors import nu_class # make it so that that invWishart returns sigma,Q, logdet(sigma) class normal_p_wishart(object): """ prior class for \mu with parameter: \mu \sim N(\theta , \Sigma_\mu) \theta \sim N(\theta_0, \Sigma_\theta) \Sigma_\mu \sim IW(Q_\mu, \nu_\mu) methods for sampling \theta, \Sigma_mu given \mu, theta_0, Q_\mu, \nu_\mu """ def __init__(self, prior=None, param=None): """ prior dict prior['theta'] -> dict ['mu'] - np.array(dim=1) \theta_0 ['Sigma'] - np.array(dim=2) \Sigma_\theta prior['Sigma'] -> dict ['nu'] - int ['Q'] - np.array(dim=2) param dict param['theta'] -> dict ['Sigma'] param['Sigma'] -> dict ['theta'] """ self.theta_class = Multivariatenormal() self.Sigma_class = invWishart() self.param = {} if not prior is None: self.set_prior(prior) if not param is None: self.set_parameter(param) def set_prior(self, prior): """ see init """ self.theta_class.set_prior(prior) self.Sigma_class.set_prior(prior) def set_prior_param0(self, d): """ setting deafult "non informative" priors + starting values """ self.theta_class.set_prior0(d) self.Sigma_class.set_prior0(d) param = {} param['theta'] = np.zeros(d) param['Sigma'] = np.eye(d) self.set_parameter(param) def set_parameter(self, param): """ see init """ self.theta_class.set_parameter(param) self.Sigma_class.set_parameter(param) self.param['theta'] = np.zeros_like(param['theta']) self.param['Sigma'] = np.zeros_like(param['Sigma']) self.param['theta'][:] = param['theta'][:] self.param['Sigma'][:] = param['Sigma'][:] def set_data(self, data): """ mu obeservations data - np.array[dim = 2] """ self.theta_class.set_data(data) self.Sigma_class.set_data(data, self.theta_class.sumY) def sample(self): """ Sampling \theta, \Sigma returns: dict with ['theta'] ['Sigma'] """ self.param['theta'][:] = self.theta_class.sample()[:] self.Sigma_class.set_parameter(self.param) self.param['Sigma'][:] = self.Sigma_class.sample()[:] self.theta_class.set_parameter(self.param) def pickle(self, filename): """ store object in file """ f = open(filename, 'wb') pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) f.close() @staticmethod def unpickle(filename): """ load object from file use: object = normal_p_wishart.unpickle(filename) """ with open(filename, 'rb') as f: return pickle.load(f) class Wishart_p_nu(object): """ Wishart plus nu prior prior class for \Sigma with parameter: \Sigma \sim IW(Q , \nu) Q \sim W(Q_s, \nu_s) \nu \sim 1 methods for sampling \nu, Q given Q_s, {\Sigma} \nu_s """ def __init__(self, prior=None, param=None, AMCMC=False): """ prior dict prior['nu'] -> None prior['Q'] -> dict ['nus'] - int ['Qs'] - np.array(dim=2) param dict param['nu'] -> dict ['Q'] param['Q'] -> dict ['nu'] """ self.nu_class = nu_class(AMCMC=AMCMC) self.Q_class = Wishart() self.param = {} if not prior is None: self.set_prior(prior) if not param is None: self.set_parameter(param) def set_MH_param(self, sigma=5, iterations=5): """ setting the parametet for the MH algorithm for the nu class sigma - the sigma in the MH algorihm on the Natural line iteration - number of time to sample using the MH algortihm """ self.nu_class.set_MH_param(sigma, iterations) def set_prior(self, prior): self.nu_class.set_prior(prior) self.Q_class.set_prior(prior) def set_prior_param0(self, d): """ setting deafult "non informative" priors + starting values """ self.Q_class.set_prior0(d) param = {} param['nu'] = d param['Q'] = np.eye(d) self.set_parameter(param) def set_val(self, param): """ setting the current iteration to param para - dict keys: ['nu'] ['Q'] """ self.param = param self.Q_class.set_parameter(self.param) self.nu_class.set_parameter(self.param) self.nu_class.set_val(param['nu']) def set_parameter(self, param): self.nu_class.set_parameter(param) self.Q_class.set_parameter(param) self.nu_class.set_d(self.Q_class.d) self.param['nu'] = param['nu'] self.param['Q'] = np.zeros_like(param['Q']) self.param['Q'][:] = param['Q'] def set_data(self, Sigmas=None, Qs=None, detQ=None): """ Sigma obeservations data - list of np.array[dim = 2] """ if Qs is None: self.Q_class.set_data(Sigmas=Sigmas) self.nu_class.set_data(data=Sigmas) def sample(self): """ Sampling \nu,Q returns: dict with ['nu'] ['Q'] """ self.param['nu'] = self.nu_class.sample() self.Q_class.set_parameter(self.param) self.param['Q'] = self.Q_class.sample() self.nu_class.set_parameter(self.param) def pickle(self, filename): """ store writte """ f = open(filename, 'wb') pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) f.close() @staticmethod def unpickle(filename): """ load class object = Wishart_p_nu.unpickle(filename) """ with open(filename, 'rb') as f: return pickle.load(f) class MultivariatenormalScaling(object): """ Class for sampling posterior distribution of scaling of covaraince matrix for multivariate normal The model is: X \sim N( \mu, \Sigma) Y_i \sim N( 0, D(exp(B_i * X)) \Sigma_{Y,i} D(exp(B_i * X))) """ def __init__(self, priors = None): self.cythonObj = MultivariatenormalScalingCython(priors) self.X = None self.sigma_MCMC = 1. def setprior(self, priors ): self.cythonObj.setprior(priors) def setprior0(self, d): self.cythonObj.setprior0(d) def setB(self, B): """ sets the regression coeff, typically fixed in regression models B - (n x d x k) numpy vector, the covariates k - dimension of beta """ self.cythonObj.setB(B) def setSigmaY(self, SigmaY): """ sets the unscaled covaraince matrix of the residuals sigmaY - (n x d x d) numpy vector, the covariance of residuals (y-B * X) """ self.cythonObj.setSigmaY(SigmaY) def setQY(self, QY): """ sets the unscaled inverse percision matrix for the residauls QY - (n x d x d) inverse of the covariance matrix """ self.cythonObj.setQY( QY) def setQYaSigmaY(self, SigmaY, QY): """ sets the unscaled inverse percision matrix for the residauls and covariance jointly """ self.cythonObj.setQYaSigmaY(SigmaY, QY) def setY(self, Y): """ Y - (nxd) numpy vector , the data where n number of observation, d - dimension of data """ self.cythonObj.setY( Y) def setData(self, Y = None, SigmaY = None, B = None, QY = None): """ if QY is given SigmaY is not used Y - (nxd) numpy vector , the data where n number of observation, d - dimension of data SigmaY - (n x d x d ) numpy vector, the covariance of residuals (y-B * X) B - (n x d x k ) numpy vector, the covariates k - dimension of beta QY - (n x d x d ) the inverses of SimgaY """ self.cythonObj.setData( Y, SigmaY, B, QY) def loglik(self, X): """ computing the loglikelihood for model defined in description X - (d x 1) the covariates """ return self.cythonObj.loglik( X) def setX(self, X): self.X = np.zeros_like(X) self.X[:] = X[:] self.d = len(X) self.sigma_MCMC = .2348 / (self.d**(1./6)) def _HMC_objs(self, X): """ computes the objects needed to sample from the posterior X - (d x 1) the coeffients """ lik = self.cythonObj.hesslik(X) grad = self.cythonObj.grad Hess = self.cythonObj.Hessian L = np.linalg.cholesky( - Hess) Lg = np.linalg.solve(L, grad) LtLg = np.linalg.solve(L.T, Lg) mu = X + ( self.sigma_MCMC**2 * 0.5) * LtLg return lik, mu, Hess, L def sample(self, z = None): """ Sampling using AMCMC MALA with preconditioner as Hessian z - (d x 1) the random values used in MCMC (mainly used for debuging) """ if z is None: z = npr.randn(self.d) if self.X is None: raise Exception('Needs a start value use .setX') lik_old, mu_old, Hess_old, L_old = self._HMC_objs( self.X) Xs = mu_old + np.linalg.solve(L_old.T, self.sigma_MCMC * z) # sampling new realization lik_star, mu_star, Hess_star,_ = self._HMC_objs( Xs) res_old = self.X - mu_star res = Xs - mu_old q_star = -(0.5/self.sigma_MCMC**2) * np.dot( res, np.dot(-Hess_old, res) ) q_old = -(0.5/self.sigma_MCMC**2) * np.dot( res_old, np.dot(-Hess_star , res_old) ) U = np.random.rand(1) if np.log(U) < lik_star - lik_old + q_old - q_star: self.X = Xs X_out = np.zeros_like(self.X) X_out[:] = self.X[:] return X_out
gpl-3.0
2,263,432,541,963,474,700
22.302326
126
0.620537
false
MJuddBooth/pandas
pandas/tests/test_algos.py
1
72406
# -*- coding: utf-8 -*- from datetime import datetime from itertools import permutations import struct import numpy as np from numpy import nan from numpy.random import RandomState import pytest from pandas._libs import ( algos as libalgos, groupby as libgroupby, hashtable as ht) from pandas.compat import PY2, lrange, range from pandas.compat.numpy import np_array_datetime64_compat import pandas.util._test_decorators as td from pandas.core.dtypes.dtypes import CategoricalDtype as CDT import pandas as pd from pandas import ( Categorical, CategoricalIndex, DatetimeIndex, Index, IntervalIndex, Series, Timestamp, compat) import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray import pandas.core.common as com import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal class TestMatch(object): def test_ints(self): values = np.array([0, 2, 1]) to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0]) result = algos.match(to_match, values) expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) result = Series(algos.match(to_match, values, np.nan)) expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0])) tm.assert_series_equal(result, expected) s = Series(np.arange(5), dtype=np.float32) result = algos.match(s, [2, 4]) expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) result = Series(algos.match(s, [2, 4], np.nan)) expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1])) tm.assert_series_equal(result, expected) def test_strings(self): values = ['foo', 'bar', 'baz'] to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux'] result = algos.match(to_match, values) expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) result = Series(algos.match(to_match, values, np.nan)) expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan])) tm.assert_series_equal(result, expected) class TestFactorize(object): def test_basic(self): labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) tm.assert_numpy_array_equal( uniques, np.array(['a', 'b', 'c'], dtype=object)) labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'], sort=True) exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = np.array(['a', 'b', 'c'], dtype=object) tm.assert_numpy_array_equal(uniques, exp) labels, uniques = algos.factorize(list(reversed(range(5)))) exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = np.array([4, 3, 2, 1, 0], dtype=np.int64) tm.assert_numpy_array_equal(uniques, exp) labels, uniques = algos.factorize(list(reversed(range(5))), sort=True) exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = np.array([0, 1, 2, 3, 4], dtype=np.int64) tm.assert_numpy_array_equal(uniques, exp) labels, uniques = algos.factorize(list(reversed(np.arange(5.)))) exp = np.array([0, 1, 2, 3, 4], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64) tm.assert_numpy_array_equal(uniques, exp) labels, uniques = algos.factorize(list(reversed(np.arange(5.))), sort=True) exp = np.array([4, 3, 2, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64) tm.assert_numpy_array_equal(uniques, exp) def test_mixed(self): # doc example reshaping.rst x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf]) labels, uniques = algos.factorize(x) exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = Index(['A', 'B', 3.14, np.inf]) tm.assert_index_equal(uniques, exp) labels, uniques = algos.factorize(x, sort=True) exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = Index([3.14, np.inf, 'A', 'B']) tm.assert_index_equal(uniques, exp) def test_datelike(self): # M8 v1 = Timestamp('20130101 09:00:00.00004') v2 = Timestamp('20130101') x = Series([v1, v1, v1, v2, v2, v1]) labels, uniques = algos.factorize(x) exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = DatetimeIndex([v1, v2]) tm.assert_index_equal(uniques, exp) labels, uniques = algos.factorize(x, sort=True) exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) exp = DatetimeIndex([v2, v1]) tm.assert_index_equal(uniques, exp) # period v1 = pd.Period('201302', freq='M') v2 = pd.Period('201303', freq='M') x = Series([v1, v1, v1, v2, v2, v1]) # periods are not 'sorted' as they are converted back into an index labels, uniques = algos.factorize(x) exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2])) labels, uniques = algos.factorize(x, sort=True) exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2])) # GH 5986 v1 = pd.to_timedelta('1 day 1 min') v2 = pd.to_timedelta('1 day') x = Series([v1, v2, v1, v1, v2, v2, v1]) labels, uniques = algos.factorize(x) exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2])) labels, uniques = algos.factorize(x, sort=True) exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(labels, exp) tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1])) def test_factorize_nan(self): # nan should map to na_sentinel, not reverse_indexer[na_sentinel] # rizer.factorize should not raise an exception if na_sentinel indexes # outside of reverse_indexer key = np.array([1, 2, 1, np.nan], dtype='O') rizer = ht.Factorizer(len(key)) for na_sentinel in (-1, 20): ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel) expected = np.array([0, 1, 0, na_sentinel], dtype='int32') assert len(set(key)) == len(set(expected)) tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel) # nan still maps to na_sentinel when sort=False key = np.array([0, np.nan, 1], dtype='O') na_sentinel = -1 # TODO(wesm): unused? ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa expected = np.array([2, -1, 0], dtype='int32') assert len(set(key)) == len(set(expected)) tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel) @pytest.mark.parametrize("data,expected_label,expected_level", [ ( [(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'], [0, 1, 2, 1, 3], [(1, 1), (1, 2), (0, 0), 'nonsense'] ), ( [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)], [0, 1, 2, 1, 3], [(1, 1), (1, 2), (0, 0), (1, 2, 3)] ), ( [(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)] ) ]) def test_factorize_tuple_list(self, data, expected_label, expected_level): # GH9454 result = pd.factorize(data) tm.assert_numpy_array_equal(result[0], np.array(expected_label, dtype=np.intp)) expected_level_array = com.asarray_tuplesafe(expected_level, dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_complex_sorting(self): # gh 12666 - check no segfault x17 = np.array([complex(i) for i in range(17)], dtype=object) msg = (r"'(<|>)' not supported between instances of 'complex' and" r" 'complex'|" r"unorderable types: complex\(\) > complex\(\)") with pytest.raises(TypeError, match=msg): algos.factorize(x17[::-1], sort=True) def test_float64_factorize(self, writable): data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64) data.setflags(write=writable) exp_labels = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp) exp_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64) labels, uniques = algos.factorize(data) tm.assert_numpy_array_equal(labels, exp_labels) tm.assert_numpy_array_equal(uniques, exp_uniques) def test_uint64_factorize(self, writable): data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64) data.setflags(write=writable) exp_labels = np.array([0, 1, 0], dtype=np.intp) exp_uniques = np.array([2**64 - 1, 1], dtype=np.uint64) labels, uniques = algos.factorize(data) tm.assert_numpy_array_equal(labels, exp_labels) tm.assert_numpy_array_equal(uniques, exp_uniques) def test_int64_factorize(self, writable): data = np.array([2**63 - 1, -2**63, 2**63 - 1], dtype=np.int64) data.setflags(write=writable) exp_labels = np.array([0, 1, 0], dtype=np.intp) exp_uniques = np.array([2**63 - 1, -2**63], dtype=np.int64) labels, uniques = algos.factorize(data) tm.assert_numpy_array_equal(labels, exp_labels) tm.assert_numpy_array_equal(uniques, exp_uniques) def test_string_factorize(self, writable): data = np.array(['a', 'c', 'a', 'b', 'c'], dtype=object) data.setflags(write=writable) exp_labels = np.array([0, 1, 0, 2, 1], dtype=np.intp) exp_uniques = np.array(['a', 'c', 'b'], dtype=object) labels, uniques = algos.factorize(data) tm.assert_numpy_array_equal(labels, exp_labels) tm.assert_numpy_array_equal(uniques, exp_uniques) def test_object_factorize(self, writable): data = np.array(['a', 'c', None, np.nan, 'a', 'b', pd.NaT, 'c'], dtype=object) data.setflags(write=writable) exp_labels = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp) exp_uniques = np.array(['a', 'c', 'b'], dtype=object) labels, uniques = algos.factorize(data) tm.assert_numpy_array_equal(labels, exp_labels) tm.assert_numpy_array_equal(uniques, exp_uniques) def test_deprecate_order(self): # gh 19727 - check warning is raised for deprecated keyword, order. # Test not valid once order keyword is removed. data = np.array([2**63, 1, 2**63], dtype=np.uint64) with tm.assert_produces_warning(expected_warning=FutureWarning): algos.factorize(data, order=True) with tm.assert_produces_warning(False): algos.factorize(data) @pytest.mark.parametrize('data', [ np.array([0, 1, 0], dtype='u8'), np.array([-2**63, 1, -2**63], dtype='i8'), np.array(['__nan__', 'foo', '__nan__'], dtype='object'), ]) def test_parametrized_factorize_na_value_default(self, data): # arrays that include the NA default for that type, but isn't used. l, u = algos.factorize(data) expected_uniques = data[[0, 1]] expected_labels = np.array([0, 1, 0], dtype=np.intp) tm.assert_numpy_array_equal(l, expected_labels) tm.assert_numpy_array_equal(u, expected_uniques) @pytest.mark.parametrize('data, na_value', [ (np.array([0, 1, 0, 2], dtype='u8'), 0), (np.array([1, 0, 1, 2], dtype='u8'), 1), (np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63), (np.array([1, -2**63, 1, 0], dtype='i8'), 1), (np.array(['a', '', 'a', 'b'], dtype=object), 'a'), (np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()), (np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object), ('a', 1)), ]) def test_parametrized_factorize_na_value(self, data, na_value): l, u = algos._factorize_array(data, na_value=na_value) expected_uniques = data[[1, 3]] expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(l, expected_labels) tm.assert_numpy_array_equal(u, expected_uniques) class TestUnique(object): def test_ints(self): arr = np.random.randint(0, 100, size=50) result = algos.unique(arr) assert isinstance(result, np.ndarray) def test_objects(self): arr = np.random.randint(0, 100, size=50).astype('O') result = algos.unique(arr) assert isinstance(result, np.ndarray) def test_object_refcount_bug(self): lst = ['A', 'B', 'C', 'D', 'E'] for i in range(1000): len(algos.unique(lst)) def test_on_index_object(self): mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile( np.arange(5), 5)]) expected = mindex.values expected.sort() mindex = mindex.repeat(2) result = pd.unique(mindex) result.sort() tm.assert_almost_equal(result, expected) def test_datetime64_dtype_array_returned(self): # GH 9431 expected = np_array_datetime64_compat( ['2015-01-03T00:00:00.000000000+0000', '2015-01-01T00:00:00.000000000+0000'], dtype='M8[ns]') dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000', '2015-01-01T00:00:00.000000000', '2015-01-01T00:00:00.000000000']) result = algos.unique(dt_index) tm.assert_numpy_array_equal(result, expected) assert result.dtype == expected.dtype s = Series(dt_index) result = algos.unique(s) tm.assert_numpy_array_equal(result, expected) assert result.dtype == expected.dtype arr = s.values result = algos.unique(arr) tm.assert_numpy_array_equal(result, expected) assert result.dtype == expected.dtype def test_timedelta64_dtype_array_returned(self): # GH 9431 expected = np.array([31200, 45678, 10000], dtype='m8[ns]') td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678]) result = algos.unique(td_index) tm.assert_numpy_array_equal(result, expected) assert result.dtype == expected.dtype s = Series(td_index) result = algos.unique(s) tm.assert_numpy_array_equal(result, expected) assert result.dtype == expected.dtype arr = s.values result = algos.unique(arr) tm.assert_numpy_array_equal(result, expected) assert result.dtype == expected.dtype def test_uint64_overflow(self): s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) exp = np.array([1, 2, 2**63], dtype=np.uint64) tm.assert_numpy_array_equal(algos.unique(s), exp) def test_nan_in_object_array(self): duplicated_items = ['a', np.nan, 'c', 'c'] result = pd.unique(duplicated_items) expected = np.array(['a', np.nan, 'c'], dtype=object) tm.assert_numpy_array_equal(result, expected) def test_categorical(self): # we are expecting to return in the order # of appearance expected = Categorical(list('bac'), categories=list('bac')) # we are expecting to return in the order # of the categories expected_o = Categorical( list('bac'), categories=list('abc'), ordered=True) # GH 15939 c = Categorical(list('baabc')) result = c.unique() tm.assert_categorical_equal(result, expected) result = algos.unique(c) tm.assert_categorical_equal(result, expected) c = Categorical(list('baabc'), ordered=True) result = c.unique() tm.assert_categorical_equal(result, expected_o) result = algos.unique(c) tm.assert_categorical_equal(result, expected_o) # Series of categorical dtype s = Series(Categorical(list('baabc')), name='foo') result = s.unique() tm.assert_categorical_equal(result, expected) result = pd.unique(s) tm.assert_categorical_equal(result, expected) # CI -> return CI ci = CategoricalIndex(Categorical(list('baabc'), categories=list('bac'))) expected = CategoricalIndex(expected) result = ci.unique() tm.assert_index_equal(result, expected) result = pd.unique(ci) tm.assert_index_equal(result, expected) def test_datetime64tz_aware(self): # GH 15939 result = Series( Index([Timestamp('20160101', tz='US/Eastern'), Timestamp('20160101', tz='US/Eastern')])).unique() expected = DatetimeArray._from_sequence(np.array([ Timestamp('2016-01-01 00:00:00-0500', tz="US/Eastern") ])) tm.assert_extension_array_equal(result, expected) result = Index([Timestamp('20160101', tz='US/Eastern'), Timestamp('20160101', tz='US/Eastern')]).unique() expected = DatetimeIndex(['2016-01-01 00:00:00'], dtype='datetime64[ns, US/Eastern]', freq=None) tm.assert_index_equal(result, expected) result = pd.unique( Series(Index([Timestamp('20160101', tz='US/Eastern'), Timestamp('20160101', tz='US/Eastern')]))) expected = DatetimeArray._from_sequence(np.array([ Timestamp('2016-01-01', tz="US/Eastern"), ])) tm.assert_extension_array_equal(result, expected) result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'), Timestamp('20160101', tz='US/Eastern')])) expected = DatetimeIndex(['2016-01-01 00:00:00'], dtype='datetime64[ns, US/Eastern]', freq=None) tm.assert_index_equal(result, expected) def test_order_of_appearance(self): # 9346 # light testing of guarantee of order of appearance # these also are the doc-examples result = pd.unique(Series([2, 1, 3, 3])) tm.assert_numpy_array_equal(result, np.array([2, 1, 3], dtype='int64')) result = pd.unique(Series([2] + [1] * 5)) tm.assert_numpy_array_equal(result, np.array([2, 1], dtype='int64')) result = pd.unique(Series([Timestamp('20160101'), Timestamp('20160101')])) expected = np.array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') tm.assert_numpy_array_equal(result, expected) result = pd.unique(Index( [Timestamp('20160101', tz='US/Eastern'), Timestamp('20160101', tz='US/Eastern')])) expected = DatetimeIndex(['2016-01-01 00:00:00'], dtype='datetime64[ns, US/Eastern]', freq=None) tm.assert_index_equal(result, expected) result = pd.unique(list('aabc')) expected = np.array(['a', 'b', 'c'], dtype=object) tm.assert_numpy_array_equal(result, expected) result = pd.unique(Series(Categorical(list('aabc')))) expected = Categorical(list('abc')) tm.assert_categorical_equal(result, expected) @pytest.mark.parametrize("arg ,expected", [ (('1', '1', '2'), np.array(['1', '2'], dtype=object)), (('foo',), np.array(['foo'], dtype=object)) ]) def test_tuple_with_strings(self, arg, expected): # see GH 17108 result = pd.unique(arg) tm.assert_numpy_array_equal(result, expected) def test_obj_none_preservation(self): # GH 20866 arr = np.array(['foo', None], dtype=object) result = pd.unique(arr) expected = np.array(['foo', None], dtype=object) tm.assert_numpy_array_equal(result, expected, strict_nan=True) def test_signed_zero(self): # GH 21866 a = np.array([-0.0, 0.0]) result = pd.unique(a) expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent tm.assert_numpy_array_equal(result, expected) def test_different_nans(self): # GH 21866 # create different nans from bit-patterns: NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] assert NAN1 != NAN1 assert NAN2 != NAN2 a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent result = pd.unique(a) expected = np.array([np.nan]) tm.assert_numpy_array_equal(result, expected) def test_first_nan_kept(self): # GH 22295 # create different nans from bit-patterns: bits_for_nan1 = 0xfff8000000000001 bits_for_nan2 = 0x7ff8000000000001 NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0] NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0] assert NAN1 != NAN1 assert NAN2 != NAN2 for el_type in [np.float64, np.object]: a = np.array([NAN1, NAN2], dtype=el_type) result = pd.unique(a) assert result.size == 1 # use bit patterns to identify which nan was kept: result_nan_bits = struct.unpack("=Q", struct.pack("d", result[0]))[0] assert result_nan_bits == bits_for_nan1 def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixture2): # GH 22295 if unique_nulls_fixture is unique_nulls_fixture2: return # skip it, values not unique a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=np.object) result = pd.unique(a) assert result.size == 2 assert a[0] is unique_nulls_fixture assert a[1] is unique_nulls_fixture2 class TestIsin(object): def test_invalid(self): msg = (r"only list-like objects are allowed to be passed to isin\(\)," r" you passed a \[int\]") with pytest.raises(TypeError, match=msg): algos.isin(1, 1) with pytest.raises(TypeError, match=msg): algos.isin(1, [1]) with pytest.raises(TypeError, match=msg): algos.isin([1], 1) def test_basic(self): result = algos.isin([1, 2], [1]) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(np.array([1, 2]), [1]) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(Series([1, 2]), [1]) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(Series([1, 2]), Series([1])) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(Series([1, 2]), {1}) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(['a', 'b'], ['a']) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(Series(['a', 'b']), Series(['a'])) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(Series(['a', 'b']), {'a'}) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(['a', 'b'], [1]) expected = np.array([False, False]) tm.assert_numpy_array_equal(result, expected) def test_i8(self): arr = pd.date_range('20130101', periods=3).values result = algos.isin(arr, [arr[0]]) expected = np.array([True, False, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(arr, arr[0:2]) expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(arr, set(arr[0:2])) expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) arr = pd.timedelta_range('1 day', periods=3).values result = algos.isin(arr, [arr[0]]) expected = np.array([True, False, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(arr, arr[0:2]) expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) result = algos.isin(arr, set(arr[0:2])) expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) def test_large(self): s = pd.date_range('20000101', periods=2000000, freq='s').values result = algos.isin(s, s[0:2]) expected = np.zeros(len(s), dtype=bool) expected[0] = True expected[1] = True tm.assert_numpy_array_equal(result, expected) def test_categorical_from_codes(self): # GH 16639 vals = np.array([0, 1, 2, 0]) cats = ['a', 'b', 'c'] Sd = Series(Categorical(1).from_codes(vals, cats)) St = Series(Categorical(1).from_codes(np.array([0, 1]), cats)) expected = np.array([True, True, False, True]) result = algos.isin(Sd, St) tm.assert_numpy_array_equal(expected, result) def test_same_nan_is_in(self): # GH 22160 # nan is special, because from " a is b" doesn't follow "a == b" # at least, isin() should follow python's "np.nan in [nan] == True" # casting to -> np.float64 -> another float-object somewher on # the way could lead jepardize this behavior comps = [np.nan] # could be casted to float64 values = [np.nan] expected = np.array([True]) result = algos.isin(comps, values) tm.assert_numpy_array_equal(expected, result) def test_same_object_is_in(self): # GH 22160 # there could be special treatment for nans # the user however could define a custom class # with similar behavior, then we at least should # fall back to usual python's behavior: "a in [a] == True" class LikeNan(object): def __eq__(self): return False def __hash__(self): return 0 a, b = LikeNan(), LikeNan() # same object -> True tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True])) # different objects -> False tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False])) def test_different_nans(self): # GH 22160 # all nans are handled as equivalent comps = [float('nan')] values = [float('nan')] assert comps[0] is not values[0] # different nan-objects # as list of python-objects: result = algos.isin(comps, values) tm.assert_numpy_array_equal(np.array([True]), result) # as object-array: result = algos.isin(np.asarray(comps, dtype=np.object), np.asarray(values, dtype=np.object)) tm.assert_numpy_array_equal(np.array([True]), result) # as float64-array: result = algos.isin(np.asarray(comps, dtype=np.float64), np.asarray(values, dtype=np.float64)) tm.assert_numpy_array_equal(np.array([True]), result) def test_no_cast(self): # GH 22160 # ensure 42 is not casted to a string comps = ['ss', 42] values = ['42'] expected = np.array([False, False]) result = algos.isin(comps, values) tm.assert_numpy_array_equal(expected, result) @pytest.mark.parametrize("empty", [[], Series(), np.array([])]) def test_empty(self, empty): # see gh-16991 vals = Index(["a", "b"]) expected = np.array([False, False]) result = algos.isin(vals, empty) tm.assert_numpy_array_equal(expected, result) def test_different_nan_objects(self): # GH 22119 comps = np.array(['nan', np.nan * 1j, float('nan')], dtype=np.object) vals = np.array([float('nan')], dtype=np.object) expected = np.array([False, False, True]) result = algos.isin(comps, vals) tm.assert_numpy_array_equal(expected, result) def test_different_nans_as_float64(self): # GH 21866 # create different nans from bit-patterns, # these nans will land in different buckets in the hash-table # if no special care is taken NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] assert NAN1 != NAN1 assert NAN2 != NAN2 # check that NAN1 and NAN2 are equivalent: arr = np.array([NAN1, NAN2], dtype=np.float64) lookup1 = np.array([NAN1], dtype=np.float64) result = algos.isin(arr, lookup1) expected = np.array([True, True]) tm.assert_numpy_array_equal(result, expected) lookup2 = np.array([NAN2], dtype=np.float64) result = algos.isin(arr, lookup2) expected = np.array([True, True]) tm.assert_numpy_array_equal(result, expected) class TestValueCounts(object): def test_value_counts(self): np.random.seed(1234) from pandas.core.reshape.tile import cut arr = np.random.randn(4) factor = cut(arr, 4) # assert isinstance(factor, n) result = algos.value_counts(factor) breaks = [-1.194, -0.535, 0.121, 0.777, 1.433] index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True)) expected = Series([1, 1, 1, 1], index=index) tm.assert_series_equal(result.sort_index(), expected.sort_index()) def test_value_counts_bins(self): s = [1, 2, 3, 4] result = algos.value_counts(s, bins=1) expected = Series([4], index=IntervalIndex.from_tuples([(0.996, 4.0)])) tm.assert_series_equal(result, expected) result = algos.value_counts(s, bins=2, sort=False) expected = Series([2, 2], index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)])) tm.assert_series_equal(result, expected) def test_value_counts_dtypes(self): result = algos.value_counts([1, 1.]) assert len(result) == 1 result = algos.value_counts([1, 1.], bins=1) assert len(result) == 1 result = algos.value_counts(Series([1, 1., '1'])) # object assert len(result) == 2 msg = "bins argument only works with numeric data" with pytest.raises(TypeError, match=msg): algos.value_counts(['1', 1], bins=1) def test_value_counts_nat(self): td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]') dt = pd.to_datetime(['NaT', '2014-01-01']) for s in [td, dt]: vc = algos.value_counts(s) vc_with_na = algos.value_counts(s, dropna=False) assert len(vc) == 1 assert len(vc_with_na) == 2 exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1}) tm.assert_series_equal(algos.value_counts(dt), exp_dt) # TODO same for (timedelta) def test_value_counts_datetime_outofbounds(self): # GH 13663 s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1), datetime(3000, 1, 1), datetime(3000, 1, 1)]) res = s.value_counts() exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)], dtype=object) exp = Series([3, 2, 1], index=exp_index) tm.assert_series_equal(res, exp) # GH 12424 res = pd.to_datetime(Series(['2362-01-01', np.nan]), errors='ignore') exp = Series(['2362-01-01', np.nan], dtype=object) tm.assert_series_equal(res, exp) def test_categorical(self): s = Series(Categorical(list('aaabbc'))) result = s.value_counts() expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c'])) tm.assert_series_equal(result, expected, check_index_type=True) # preserve order? s = s.cat.as_ordered() result = s.value_counts() expected.index = expected.index.as_ordered() tm.assert_series_equal(result, expected, check_index_type=True) def test_categorical_nans(self): s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan) s.iloc[1] = np.nan result = s.value_counts() expected = Series([4, 3, 2], index=CategoricalIndex( ['a', 'b', 'c'], categories=['a', 'b', 'c'])) tm.assert_series_equal(result, expected, check_index_type=True) result = s.value_counts(dropna=False) expected = Series([ 4, 3, 2, 1 ], index=CategoricalIndex(['a', 'b', 'c', np.nan])) tm.assert_series_equal(result, expected, check_index_type=True) # out of order s = Series(Categorical( list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c'])) s.iloc[1] = np.nan result = s.value_counts() expected = Series([4, 3, 2], index=CategoricalIndex( ['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True)) tm.assert_series_equal(result, expected, check_index_type=True) result = s.value_counts(dropna=False) expected = Series([4, 3, 2, 1], index=CategoricalIndex( ['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True)) tm.assert_series_equal(result, expected, check_index_type=True) def test_categorical_zeroes(self): # keep the `d` category with 0 s = Series(Categorical( list('bbbaac'), categories=list('abcd'), ordered=True)) result = s.value_counts() expected = Series([3, 2, 1, 0], index=Categorical( ['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True)) tm.assert_series_equal(result, expected, check_index_type=True) def test_dropna(self): # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328 tm.assert_series_equal( Series([True, True, False]).value_counts(dropna=True), Series([2, 1], index=[True, False])) tm.assert_series_equal( Series([True, True, False]).value_counts(dropna=False), Series([2, 1], index=[True, False])) tm.assert_series_equal( Series([True, True, False, None]).value_counts(dropna=True), Series([2, 1], index=[True, False])) tm.assert_series_equal( Series([True, True, False, None]).value_counts(dropna=False), Series([2, 1, 1], index=[True, False, np.nan])) tm.assert_series_equal( Series([10.3, 5., 5.]).value_counts(dropna=True), Series([2, 1], index=[5., 10.3])) tm.assert_series_equal( Series([10.3, 5., 5.]).value_counts(dropna=False), Series([2, 1], index=[5., 10.3])) tm.assert_series_equal( Series([10.3, 5., 5., None]).value_counts(dropna=True), Series([2, 1], index=[5., 10.3])) # 32-bit linux has a different ordering if not compat.is_platform_32bit(): result = Series([10.3, 5., 5., None]).value_counts(dropna=False) expected = Series([2, 1, 1], index=[5., 10.3, np.nan]) tm.assert_series_equal(result, expected) def test_value_counts_normalized(self): # GH12558 s = Series([1, 2, np.nan, np.nan, np.nan]) dtypes = (np.float64, np.object, 'M8[ns]') for t in dtypes: s_typed = s.astype(t) result = s_typed.value_counts(normalize=True, dropna=False) expected = Series([0.6, 0.2, 0.2], index=Series([np.nan, 2.0, 1.0], dtype=t)) tm.assert_series_equal(result, expected) result = s_typed.value_counts(normalize=True, dropna=True) expected = Series([0.5, 0.5], index=Series([2.0, 1.0], dtype=t)) tm.assert_series_equal(result, expected) def test_value_counts_uint64(self): arr = np.array([2**63], dtype=np.uint64) expected = Series([1], index=[2**63]) result = algos.value_counts(arr) tm.assert_series_equal(result, expected) arr = np.array([-1, 2**63], dtype=object) expected = Series([1, 1], index=[-1, 2**63]) result = algos.value_counts(arr) # 32-bit linux has a different ordering if not compat.is_platform_32bit(): tm.assert_series_equal(result, expected) class TestDuplicated(object): def test_duplicated_with_nas(self): keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object) result = algos.duplicated(keys) expected = np.array([False, False, False, True, False, True]) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep='first') expected = np.array([False, False, False, True, False, True]) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep='last') expected = np.array([True, False, True, False, False, False]) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep=False) expected = np.array([True, False, True, True, False, True]) tm.assert_numpy_array_equal(result, expected) keys = np.empty(8, dtype=object) for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2)): keys[i] = t result = algos.duplicated(keys) falses = [False] * 4 trues = [True] * 4 expected = np.array(falses + trues) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep='last') expected = np.array(trues + falses) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep=False) expected = np.array(trues + trues) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize('case', [ np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]), np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]), np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j, 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]), np.array(['a', 'b', 'a', 'e', 'c', 'b', 'd', 'a', 'e', 'f'], dtype=object), np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64), ]) def test_numeric_object_likes(self, case): exp_first = np.array([False, False, True, False, False, True, False, True, True, False]) exp_last = np.array([True, True, True, True, False, False, False, False, False, False]) exp_false = exp_first | exp_last res_first = algos.duplicated(case, keep='first') tm.assert_numpy_array_equal(res_first, exp_first) res_last = algos.duplicated(case, keep='last') tm.assert_numpy_array_equal(res_last, exp_last) res_false = algos.duplicated(case, keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # index for idx in [Index(case), Index(case, dtype='category')]: res_first = idx.duplicated(keep='first') tm.assert_numpy_array_equal(res_first, exp_first) res_last = idx.duplicated(keep='last') tm.assert_numpy_array_equal(res_last, exp_last) res_false = idx.duplicated(keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # series for s in [Series(case), Series(case, dtype='category')]: res_first = s.duplicated(keep='first') tm.assert_series_equal(res_first, Series(exp_first)) res_last = s.duplicated(keep='last') tm.assert_series_equal(res_last, Series(exp_last)) res_false = s.duplicated(keep=False) tm.assert_series_equal(res_false, Series(exp_false)) def test_datetime_likes(self): dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03', '2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06'] td = ['1 days', '2 days', '1 days', 'NaT', '3 days', '2 days', '4 days', '1 days', 'NaT', '6 days'] cases = [np.array([Timestamp(d) for d in dt]), np.array([Timestamp(d, tz='US/Eastern') for d in dt]), np.array([pd.Period(d, freq='D') for d in dt]), np.array([np.datetime64(d) for d in dt]), np.array([pd.Timedelta(d) for d in td])] exp_first = np.array([False, False, True, False, False, True, False, True, True, False]) exp_last = np.array([True, True, True, True, False, False, False, False, False, False]) exp_false = exp_first | exp_last for case in cases: res_first = algos.duplicated(case, keep='first') tm.assert_numpy_array_equal(res_first, exp_first) res_last = algos.duplicated(case, keep='last') tm.assert_numpy_array_equal(res_last, exp_last) res_false = algos.duplicated(case, keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # index for idx in [Index(case), Index(case, dtype='category'), Index(case, dtype=object)]: res_first = idx.duplicated(keep='first') tm.assert_numpy_array_equal(res_first, exp_first) res_last = idx.duplicated(keep='last') tm.assert_numpy_array_equal(res_last, exp_last) res_false = idx.duplicated(keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # series for s in [Series(case), Series(case, dtype='category'), Series(case, dtype=object)]: res_first = s.duplicated(keep='first') tm.assert_series_equal(res_first, Series(exp_first)) res_last = s.duplicated(keep='last') tm.assert_series_equal(res_last, Series(exp_last)) res_false = s.duplicated(keep=False) tm.assert_series_equal(res_false, Series(exp_false)) def test_unique_index(self): cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)] for case in cases: assert case.is_unique is True tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False])) @pytest.mark.parametrize('arr, unique', [ ([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)], [(0, 0), (0, 1), (1, 0), (1, 1)]), ([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')], [('b', 'c'), ('a', 'b')]), ([('a', 1), ('b', 2), ('a', 3), ('a', 1)], [('a', 1), ('b', 2), ('a', 3)]), ]) def test_unique_tuples(self, arr, unique): # https://github.com/pandas-dev/pandas/issues/16519 expected = np.empty(len(unique), dtype=object) expected[:] = unique result = pd.unique(arr) tm.assert_numpy_array_equal(result, expected) class GroupVarTestMixin(object): def test_group_var_generic_1d(self): prng = RandomState(1234) out = (np.nan * np.ones((5, 1))).astype(self.dtype) counts = np.zeros(5, dtype='int64') values = 10 * prng.rand(15, 1).astype(self.dtype) labels = np.tile(np.arange(5), (3, )).astype('int64') expected_out = (np.squeeze(values) .reshape((5, 3), order='F') .std(axis=1, ddof=1) ** 2)[:, np.newaxis] expected_counts = counts + 3 self.algo(out, counts, values, labels) assert np.allclose(out, expected_out, self.rtol) tm.assert_numpy_array_equal(counts, expected_counts) def test_group_var_generic_1d_flat_labels(self): prng = RandomState(1234) out = (np.nan * np.ones((1, 1))).astype(self.dtype) counts = np.zeros(1, dtype='int64') values = 10 * prng.rand(5, 1).astype(self.dtype) labels = np.zeros(5, dtype='int64') expected_out = np.array([[values.std(ddof=1) ** 2]]) expected_counts = counts + 5 self.algo(out, counts, values, labels) assert np.allclose(out, expected_out, self.rtol) tm.assert_numpy_array_equal(counts, expected_counts) def test_group_var_generic_2d_all_finite(self): prng = RandomState(1234) out = (np.nan * np.ones((5, 2))).astype(self.dtype) counts = np.zeros(5, dtype='int64') values = 10 * prng.rand(10, 2).astype(self.dtype) labels = np.tile(np.arange(5), (2, )).astype('int64') expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2 expected_counts = counts + 2 self.algo(out, counts, values, labels) assert np.allclose(out, expected_out, self.rtol) tm.assert_numpy_array_equal(counts, expected_counts) def test_group_var_generic_2d_some_nan(self): prng = RandomState(1234) out = (np.nan * np.ones((5, 2))).astype(self.dtype) counts = np.zeros(5, dtype='int64') values = 10 * prng.rand(10, 2).astype(self.dtype) values[:, 1] = np.nan labels = np.tile(np.arange(5), (2, )).astype('int64') expected_out = np.vstack([values[:, 0] .reshape(5, 2, order='F') .std(ddof=1, axis=1) ** 2, np.nan * np.ones(5)]).T.astype(self.dtype) expected_counts = counts + 2 self.algo(out, counts, values, labels) tm.assert_almost_equal(out, expected_out, check_less_precise=6) tm.assert_numpy_array_equal(counts, expected_counts) def test_group_var_constant(self): # Regression test from GH 10448. out = np.array([[np.nan]], dtype=self.dtype) counts = np.array([0], dtype='int64') values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype) labels = np.zeros(3, dtype='int64') self.algo(out, counts, values, labels) assert counts[0] == 3 assert out[0, 0] >= 0 tm.assert_almost_equal(out[0, 0], 0.0) class TestGroupVarFloat64(GroupVarTestMixin): __test__ = True algo = staticmethod(libgroupby.group_var_float64) dtype = np.float64 rtol = 1e-5 def test_group_var_large_inputs(self): prng = RandomState(1234) out = np.array([[np.nan]], dtype=self.dtype) counts = np.array([0], dtype='int64') values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype) values.shape = (10 ** 6, 1) labels = np.zeros(10 ** 6, dtype='int64') self.algo(out, counts, values, labels) assert counts[0] == 10 ** 6 tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True) class TestGroupVarFloat32(GroupVarTestMixin): __test__ = True algo = staticmethod(libgroupby.group_var_float32) dtype = np.float32 rtol = 1e-2 class TestHashTable(object): def test_lookup_nan(self, writable): xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3]) # GH 21688 ensure we can deal with readonly memory views xs.setflags(write=writable) m = ht.Float64HashTable() m.map_locations(xs) tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64)) def test_add_signed_zeros(self): # GH 21866 inconsistent hash-function for float64 # default hash-function would lead to different hash-buckets # for 0.0 and -0.0 if there are more than 2^30 hash-buckets # but this would mean 16GB N = 4 # 12 * 10**8 would trigger the error, if you have enough memory m = ht.Float64HashTable(N) m.set_item(0.0, 0) m.set_item(-0.0, 0) assert len(m) == 1 # 0.0 and -0.0 are equivalent def test_add_different_nans(self): # GH 21866 inconsistent hash-function for float64 # create different nans from bit-patterns: NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] assert NAN1 != NAN1 assert NAN2 != NAN2 # default hash function would lead to different hash-buckets # for NAN1 and NAN2 even if there are only 4 buckets: m = ht.Float64HashTable() m.set_item(NAN1, 0) m.set_item(NAN2, 0) assert len(m) == 1 # NAN1 and NAN2 are equivalent def test_lookup_overflow(self, writable): xs = np.array([1, 2, 2**63], dtype=np.uint64) # GH 21688 ensure we can deal with readonly memory views xs.setflags(write=writable) m = ht.UInt64HashTable() m.map_locations(xs) tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64)) def test_get_unique(self): s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) exp = np.array([1, 2, 2**63], dtype=np.uint64) tm.assert_numpy_array_equal(s.unique(), exp) @pytest.mark.parametrize('nvals', [0, 10]) # resizing to 0 is special case @pytest.mark.parametrize('htable, uniques, dtype, safely_resizes', [ (ht.PyObjectHashTable, ht.ObjectVector, 'object', False), (ht.StringHashTable, ht.ObjectVector, 'object', True), (ht.Float64HashTable, ht.Float64Vector, 'float64', False), (ht.Int64HashTable, ht.Int64Vector, 'int64', False), (ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]) def test_vector_resize(self, writable, htable, uniques, dtype, safely_resizes, nvals): # Test for memory errors after internal vector # reallocations (GH 7157) vals = np.array(np.random.randn(1000), dtype=dtype) # GH 21688 ensures we can deal with read-only memory views vals.setflags(write=writable) # initialise instances; cannot initialise in parametrization, # as otherwise external views would be held on the array (which is # one of the things this test is checking) htable = htable() uniques = uniques() # get_labels may append to uniques htable.get_labels(vals[:nvals], uniques, 0, -1) # to_array() sets an external_view_exists flag on uniques. tmp = uniques.to_array() oldshape = tmp.shape # subsequent get_labels() calls can no longer append to it # (except for StringHashTables + ObjectVector) if safely_resizes: htable.get_labels(vals, uniques, 0, -1) else: with pytest.raises(ValueError, match='external reference.*'): htable.get_labels(vals, uniques, 0, -1) uniques.to_array() # should not raise here assert tmp.shape == oldshape @pytest.mark.parametrize('htable, tm_dtype', [ (ht.PyObjectHashTable, 'String'), (ht.StringHashTable, 'String'), (ht.Float64HashTable, 'Float'), (ht.Int64HashTable, 'Int'), (ht.UInt64HashTable, 'UInt')]) def test_hashtable_unique(self, htable, tm_dtype, writable): # output of maker has guaranteed unique elements maker = getattr(tm, 'make' + tm_dtype + 'Index') s = Series(maker(1000)) if htable == ht.Float64HashTable: # add NaN for float column s.loc[500] = np.nan elif htable == ht.PyObjectHashTable: # use different NaN types for object column s.loc[500:502] = [np.nan, None, pd.NaT] # create duplicated selection s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) s_duplicated.values.setflags(write=writable) # drop_duplicates has own cython code (hash_table_func_helper.pxi) # and is tested separately; keeps first occurrence like ht.unique() expected_unique = s_duplicated.drop_duplicates(keep='first').values result_unique = htable().unique(s_duplicated.values) tm.assert_numpy_array_equal(result_unique, expected_unique) # test return_inverse=True # reconstruction can only succeed if the inverse is correct result_unique, result_inverse = htable().unique(s_duplicated.values, return_inverse=True) tm.assert_numpy_array_equal(result_unique, expected_unique) reconstr = result_unique[result_inverse] tm.assert_numpy_array_equal(reconstr, s_duplicated.values) @pytest.mark.parametrize('htable, tm_dtype', [ (ht.PyObjectHashTable, 'String'), (ht.StringHashTable, 'String'), (ht.Float64HashTable, 'Float'), (ht.Int64HashTable, 'Int'), (ht.UInt64HashTable, 'UInt')]) def test_hashtable_factorize(self, htable, tm_dtype, writable): # output of maker has guaranteed unique elements maker = getattr(tm, 'make' + tm_dtype + 'Index') s = Series(maker(1000)) if htable == ht.Float64HashTable: # add NaN for float column s.loc[500] = np.nan elif htable == ht.PyObjectHashTable: # use different NaN types for object column s.loc[500:502] = [np.nan, None, pd.NaT] # create duplicated selection s_duplicated = s.sample(frac=3, replace=True).reset_index(drop=True) s_duplicated.values.setflags(write=writable) na_mask = s_duplicated.isna().values result_unique, result_inverse = htable().factorize(s_duplicated.values) # drop_duplicates has own cython code (hash_table_func_helper.pxi) # and is tested separately; keeps first occurrence like ht.factorize() # since factorize removes all NaNs, we do the same here expected_unique = s_duplicated.dropna().drop_duplicates().values tm.assert_numpy_array_equal(result_unique, expected_unique) # reconstruction can only succeed if the inverse is correct. Since # factorize removes the NaNs, those have to be excluded here as well result_reconstruct = result_unique[result_inverse[~na_mask]] expected_reconstruct = s_duplicated.dropna().values tm.assert_numpy_array_equal(result_reconstruct, expected_reconstruct) @pytest.mark.parametrize('hashtable', [ ht.PyObjectHashTable, ht.StringHashTable, ht.Float64HashTable, ht.Int64HashTable, ht.UInt64HashTable]) def test_hashtable_large_sizehint(self, hashtable): # GH 22729 size_hint = np.iinfo(np.uint32).max + 1 tbl = hashtable(size_hint=size_hint) # noqa def test_quantile(): s = Series(np.random.randn(100)) result = algos.quantile(s, [0, .25, .5, .75, 1.]) expected = algos.quantile(s.values, [0, .25, .5, .75, 1.]) tm.assert_almost_equal(result, expected) def test_unique_label_indices(): a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8') left = ht.unique_label_indices(a) right = np.unique(a, return_index=True)[1] tm.assert_numpy_array_equal(left, right, check_dtype=False) a[np.random.choice(len(a), 10)] = -1 left = ht.unique_label_indices(a) right = np.unique(a, return_index=True)[1][1:] tm.assert_numpy_array_equal(left, right, check_dtype=False) class TestRank(object): @td.skip_if_no_scipy def test_scipy_compat(self): from scipy.stats import rankdata def _check(arr): mask = ~np.isfinite(arr) arr = arr.copy() result = libalgos.rank_1d_float64(arr) arr[mask] = np.inf exp = rankdata(arr) exp[mask] = nan assert_almost_equal(result, exp) _check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan])) _check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan])) def test_basic(self): exp = np.array([1, 2], dtype=np.float64) for dtype in np.typecodes['AllInteger']: s = Series([1, 100], dtype=dtype) tm.assert_numpy_array_equal(algos.rank(s), exp) def test_uint64_overflow(self): exp = np.array([1, 2], dtype=np.float64) for dtype in [np.float64, np.uint64]: s = Series([1, 2**63], dtype=dtype) tm.assert_numpy_array_equal(algos.rank(s), exp) def test_too_many_ndims(self): arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) msg = "Array with ndim > 2 are not supported" with pytest.raises(TypeError, match=msg): algos.rank(arr) @pytest.mark.single @pytest.mark.high_memory @pytest.mark.parametrize('values', [ np.arange(2**24 + 1), np.arange(2**25 + 2).reshape(2**24 + 1, 2)], ids=['1d', '2d']) def test_pct_max_many_rows(self, values): # GH 18271 result = algos.rank(values, pct=True).max() assert result == 1 def test_pad_backfill_object_segfault(): old = np.array([], dtype='O') new = np.array([datetime(2010, 12, 31)], dtype='O') result = libalgos.pad["object"](old, new) expected = np.array([-1], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) result = libalgos.pad["object"](new, old) expected = np.array([], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill["object"](old, new) expected = np.array([-1], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill["object"](new, old) expected = np.array([], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) def test_arrmap(): values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O') result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar']) assert (result.dtype == np.bool_) class TestTseriesUtil(object): def test_combineFunc(self): pass def test_reindex(self): pass def test_isna(self): pass def test_groupby(self): pass def test_groupby_withnull(self): pass def test_backfill(self): old = Index([1, 5, 10]) new = Index(lrange(12)) filler = libalgos.backfill["int64_t"](old.values, new.values) expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.int64) tm.assert_numpy_array_equal(filler, expect_filler) # corner case old = Index([1, 4]) new = Index(lrange(5, 10)) filler = libalgos.backfill["int64_t"](old.values, new.values) expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64) tm.assert_numpy_array_equal(filler, expect_filler) def test_pad(self): old = Index([1, 5, 10]) new = Index(lrange(12)) filler = libalgos.pad["int64_t"](old.values, new.values) expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.int64) tm.assert_numpy_array_equal(filler, expect_filler) # corner case old = Index([5, 10]) new = Index(lrange(5)) filler = libalgos.pad["int64_t"](old.values, new.values) expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64) tm.assert_numpy_array_equal(filler, expect_filler) def test_is_lexsorted(): failure = [ np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'), np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], dtype='int64')] assert (not libalgos.is_lexsorted(failure)) def test_groupsort_indexer(): a = np.random.randint(0, 1000, 100).astype(np.int64) b = np.random.randint(0, 1000, 100).astype(np.int64) result = libalgos.groupsort_indexer(a, 1000)[0] # need to use a stable sort # np.argsort returns int, groupsort_indexer # always returns int64 expected = np.argsort(a, kind='mergesort') expected = expected.astype(np.int64) tm.assert_numpy_array_equal(result, expected) # compare with lexsort # np.lexsort returns int, groupsort_indexer # always returns int64 key = a * 1000 + b result = libalgos.groupsort_indexer(key, 1000000)[0] expected = np.lexsort((b, a)) expected = expected.astype(np.int64) tm.assert_numpy_array_equal(result, expected) def test_infinity_sort(): # GH 13445 # numpy's argsort can be unhappy if something is less than # itself. Instead, let's give our infinities a self-consistent # ordering, but outside the float extended real line. Inf = libalgos.Infinity() NegInf = libalgos.NegInfinity() ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf] assert all(Inf >= x for x in ref_nums) assert all(Inf > x or x is Inf for x in ref_nums) assert Inf >= Inf and Inf == Inf assert not Inf < Inf and not Inf > Inf assert libalgos.Infinity() == libalgos.Infinity() assert not libalgos.Infinity() != libalgos.Infinity() assert all(NegInf <= x for x in ref_nums) assert all(NegInf < x or x is NegInf for x in ref_nums) assert NegInf <= NegInf and NegInf == NegInf assert not NegInf < NegInf and not NegInf > NegInf assert libalgos.NegInfinity() == libalgos.NegInfinity() assert not libalgos.NegInfinity() != libalgos.NegInfinity() for perm in permutations(ref_nums): assert sorted(perm) == ref_nums # smoke tests np.array([libalgos.Infinity()] * 32).argsort() np.array([libalgos.NegInfinity()] * 32).argsort() def test_infinity_against_nan(): Inf = libalgos.Infinity() NegInf = libalgos.NegInfinity() assert not Inf > np.nan assert not Inf >= np.nan assert not Inf < np.nan assert not Inf <= np.nan assert not Inf == np.nan assert Inf != np.nan assert not NegInf > np.nan assert not NegInf >= np.nan assert not NegInf < np.nan assert not NegInf <= np.nan assert not NegInf == np.nan assert NegInf != np.nan def test_ensure_platform_int(): arr = np.arange(100, dtype=np.intp) result = libalgos.ensure_platform_int(arr) assert (result is arr) def test_int64_add_overflow(): # see gh-14068 msg = "Overflow in int64 addition" m = np.iinfo(np.int64).max n = np.iinfo(np.int64).min with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([m, m]), m) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([m, m]), np.array([m, m])) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([n, n]), n) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([n, n]), np.array([n, n])) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([m, n]), np.array([n, n])) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]), arr_mask=np.array([False, True])) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]), b_mask=np.array([False, True])) with pytest.raises(OverflowError, match=msg): algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]), arr_mask=np.array([False, True]), b_mask=np.array([False, True])) with pytest.raises(OverflowError, match=msg): with tm.assert_produces_warning(RuntimeWarning): algos.checked_add_with_arr(np.array([m, m]), np.array([np.nan, m])) # Check that the nan boolean arrays override whether or not # the addition overflows. We don't check the result but just # the fact that an OverflowError is not raised. algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]), arr_mask=np.array([True, True])) algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]), b_mask=np.array([True, True])) algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]), arr_mask=np.array([True, False]), b_mask=np.array([False, True])) class TestMode(object): def test_no_mode(self): exp = Series([], dtype=np.float64) tm.assert_series_equal(algos.mode([]), exp) def test_mode_single(self): # GH 15714 exp_single = [1] data_single = [1] exp_multi = [1] data_multi = [1, 1] for dt in np.typecodes['AllInteger'] + np.typecodes['Float']: s = Series(data_single, dtype=dt) exp = Series(exp_single, dtype=dt) tm.assert_series_equal(algos.mode(s), exp) s = Series(data_multi, dtype=dt) exp = Series(exp_multi, dtype=dt) tm.assert_series_equal(algos.mode(s), exp) exp = Series([1], dtype=np.int) tm.assert_series_equal(algos.mode([1]), exp) exp = Series(['a', 'b', 'c'], dtype=np.object) tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp) def test_number_mode(self): exp_single = [1] data_single = [1] * 5 + [2] * 3 exp_multi = [1, 3] data_multi = [1] * 5 + [2] * 3 + [3] * 5 for dt in np.typecodes['AllInteger'] + np.typecodes['Float']: s = Series(data_single, dtype=dt) exp = Series(exp_single, dtype=dt) tm.assert_series_equal(algos.mode(s), exp) s = Series(data_multi, dtype=dt) exp = Series(exp_multi, dtype=dt) tm.assert_series_equal(algos.mode(s), exp) def test_strobj_mode(self): exp = ['b'] data = ['a'] * 2 + ['b'] * 3 s = Series(data, dtype='c') exp = Series(exp, dtype='c') tm.assert_series_equal(algos.mode(s), exp) exp = ['bar'] data = ['foo'] * 2 + ['bar'] * 3 for dt in [str, object]: s = Series(data, dtype=dt) exp = Series(exp, dtype=dt) tm.assert_series_equal(algos.mode(s), exp) def test_datelike_mode(self): exp = Series(['1900-05-03', '2011-01-03', '2013-01-02'], dtype="M8[ns]") s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]') tm.assert_series_equal(algos.mode(s), exp) exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]') s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03', '2013-01-02'], dtype='M8[ns]') tm.assert_series_equal(algos.mode(s), exp) def test_timedelta_mode(self): exp = Series(['-1 days', '0 days', '1 days'], dtype='timedelta64[ns]') s = Series(['1 days', '-1 days', '0 days'], dtype='timedelta64[ns]') tm.assert_series_equal(algos.mode(s), exp) exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]') s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min', '2 min', '2 min'], dtype='timedelta64[ns]') tm.assert_series_equal(algos.mode(s), exp) def test_mixed_dtype(self): exp = Series(['foo']) s = Series([1, 'foo', 'foo']) tm.assert_series_equal(algos.mode(s), exp) def test_uint64_overflow(self): exp = Series([2**63], dtype=np.uint64) s = Series([1, 2**63, 2**63], dtype=np.uint64) tm.assert_series_equal(algos.mode(s), exp) exp = Series([1, 2**63], dtype=np.uint64) s = Series([1, 2**63], dtype=np.uint64) tm.assert_series_equal(algos.mode(s), exp) def test_categorical(self): c = Categorical([1, 2]) exp = c tm.assert_categorical_equal(algos.mode(c), exp) tm.assert_categorical_equal(c.mode(), exp) c = Categorical([1, 'a', 'a']) exp = Categorical(['a'], categories=[1, 'a']) tm.assert_categorical_equal(algos.mode(c), exp) tm.assert_categorical_equal(c.mode(), exp) c = Categorical([1, 1, 2, 3, 3]) exp = Categorical([1, 3], categories=[1, 2, 3]) tm.assert_categorical_equal(algos.mode(c), exp) tm.assert_categorical_equal(c.mode(), exp) def test_index(self): idx = Index([1, 2, 3]) exp = Series([1, 2, 3], dtype=np.int64) tm.assert_series_equal(algos.mode(idx), exp) idx = Index([1, 'a', 'a']) exp = Series(['a'], dtype=object) tm.assert_series_equal(algos.mode(idx), exp) idx = Index([1, 1, 2, 3, 3]) exp = Series([1, 3], dtype=np.int64) tm.assert_series_equal(algos.mode(idx), exp) exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]') idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min', '2 min', '2 min'], dtype='timedelta64[ns]') tm.assert_series_equal(algos.mode(idx), exp)
bsd-3-clause
2,409,841,351,250,474,000
37.554846
80
0.560575
false
grapesmoker/nba
Player.py
1
18520
from __future__ import division import datetime as dt import numpy as np from settings import players from utils import compute_ts_length from drawing.player_shot_charts import create_shot_chart from Boxscore import PlayerBoxscore class Player: _coll = players def __init__(self, player_id): self._coll = self.__class__._coll self._player = self._coll.find_one({'id': player_id}) try: self._first_name = self._player['firstName'] self._last_name = self._player['lastName'] self._id = self._player['id'] self._add_custom_fields() except Exception as ex: self._id = None self._first_name = None self.last_name = None def _add_custom_fields(self): if 'timeOnCourt' not in self._player: self._coll.update_one({'id': self.id}, {'$set': {'timeOnCourt': []}}) self._player['timeOnCourt'] = [] @property def first_name(self): return self._first_name @property def last_name(self): return self._last_name @property def id(self): return self._id def __str__(self): return '{0} {1}'.format(self._first_name, self._last_name) def __repr__(self): return self.__str__() def __cmp__(self, other): if self.id == other.id and self.first_name == other.first_name and self.last_name == other.last_name: return 0 elif self.last_name < other.last_name: return -1 elif self.last_name == other.last_name and self.first_name < self.last_name: return -1 elif self.last_name == other.last_name and self.first_name == other.last_name: return self.id < other.id else: return 1 def __hash__(self): return hash('{}{}{}'.format(self.first_name, self.last_name, self._id)) def check_time_consistency(self, times_subbed_in, times_subbed_out): consistent = True if len(times_subbed_in) == len(times_subbed_out) or len(times_subbed_in) == len(times_subbed_out) + 1: correct = True for to in times_subbed_out: for i, ti in enumerate(times_subbed_in[:-1]): ti_next = times_subbed_in[i + 1] if not (to < ti and to >= ti_next): correct = False consistent = correct else: consistent = False return consistent def time_on_court_bad(self, game): plays_subbed_in = [event for event in game.events if event.play_text.find('Substitution:') > -1 and event.players[0] == self] plays_subbed_out = [event for event in game.events if event.play_text.find('Substitution:') > -1 and event.players[1] == self] times_subbed_in = [event.play_time for event in plays_subbed_in] times_subbed_out = [event.play_time for event in plays_subbed_out] q_starters = game.quarter_starters() q2 = dt.timedelta(minutes=36) q3 = dt.timedelta(minutes=24) q4 = dt.timedelta(minutes=12) q_end_times = [q2, q3, q4] for q, starters in q_starters.items(): q_start_time = dt.timedelta(minutes=((5 - q) * 12)) q_end_time = dt.timedelta(minutes=((4 - q) * 12)) if self in starters and q > 1: last_sub_in = sorted([t for t in times_subbed_in if t > q_start_time], reverse=True) last_sub_out = sorted([t for t in times_subbed_out if t > q_start_time], reverse=True) print 'q: {}, last sub in: {}, last sub out: {}'.format(q, str(last_sub_in), str(last_sub_out)) if last_sub_in != [] and last_sub_out != []: last_sub_in, last_sub_out = last_sub_in[-1], last_sub_out[-1] if last_sub_in > last_sub_out: times_subbed_in.append(q_start_time) if last_sub_in < last_sub_out: times_subbed_out.append(q_end_time) elif last_sub_in == []: times_subbed_in.append(q_start_time) elif self not in starters and q > 1: last_sub_in = sorted([t for t in times_subbed_in if t > q_start_time], reverse=True) last_sub_out = sorted([t for t in times_subbed_out if t > q_start_time], reverse=True) print 'q: {}, last sub in: {}, last sub out: {}'.format(q, str(last_sub_in), str(last_sub_out)) elif self in starters and q == 1: times_subbed_in.append(q_start_time) times_subbed_in = sorted(times_subbed_in, reverse=True) print 'in/out:', map(str, times_subbed_in), map(str, times_subbed_out) i = 0 while not self.check_time_consistency(times_subbed_in, times_subbed_out) and i < len(times_subbed_in): ti = times_subbed_in[i] if i + 1 < len(times_subbed_in): ti_next = times_subbed_in[i + 1] else: ti_next = dt.timedelta(minutes=0) to_arr = [to for to in times_subbed_out if ti_next < to < ti] if len(to_arr) == 0: if q2 < ti and q2 >= ti_next: times_subbed_out.append(q2) elif q3 < ti and q3 >= ti_next: times_subbed_out.append(q3) elif q4 < ti and q4 >= ti_next: times_subbed_out.append(q4) times_subbed_out = sorted(times_subbed_out, reverse=True) i += 1 if len(times_subbed_out) == len(times_subbed_in) - 1: times_subbed_out.append(dt.timedelta(minutes=0)) time_stream = zip(times_subbed_in, times_subbed_out) return time_stream def time_on_court(self, game, recompute=False): empty_ts = False timestream = [] if 'timeOnCourt' in self._player and not recompute: #print 'cached timestreams found' for item in self._player['timeOnCourt']: if item != [] and item['gameId'] == game.id: #print 'retrieving cached timestream for {}'.format(self) for t in item['times']: timestream.append((dt.timedelta(seconds=t['start']), dt.timedelta(seconds=t['end']))) if timestream == []: empty_ts = True if 'timeOnCourt' not in self._player or empty_ts or recompute: if recompute: self._coll.update({'id': self.id}, {'$pull': {'timeOnCourt': {'gameId': game.id}}}) #print 'computing timestream for {}'.format(self) periods = game.periods quarter_starters = game.quarter_starters() quarter_enders = game.quarter_enders() times_subbed_in = [] times_subbed_out = [] for q in range(1, periods + 1): if q < 5: q_start_time = dt.timedelta(minutes=(q - 1) * 12) q_end_time = dt.timedelta(minutes=q * 12) else: q_start_time = dt.timedelta(minutes=48 + (q - 5) * 5) q_end_time = dt.timedelta(minutes=48 + ((q - 4) * 5)) if self in quarter_starters[q]: times_subbed_in.append(q_start_time) quarter_plays = sorted([ev for ev in game.events if ev.period == q]) times_subbed_in += [event.play_time for event in quarter_plays if event.is_substitution and event.players[0] == self] times_subbed_out += [event.play_time for event in quarter_plays if event.is_substitution and event.players[1] == self] if self in quarter_enders[q]: times_subbed_out.append(q_end_time) #print map(str, times_subbed_in) #print map(str, times_subbed_out) timestream = zip(times_subbed_in, times_subbed_out) #print map(str, times_subbed_in) #print map(str, times_subbed_out) time_data = [{'start': interval[0].seconds, 'end': interval[1].seconds} for interval in timestream] time_on_court = {'gameId': game.id, 'times': time_data} self._coll.update_one({'id': self.id}, {'$addToSet': {'timeOnCourt': time_on_court}}) self._player['timeOnCourt'].append(time_on_court) return timestream def save_timestream(self, game, timestream): self._coll.update({'id': self.id}, {'$pull': {'timeOnCourt': {'gameId': game.id}}}) time_data = [{'start': interval[0].seconds, 'end': interval[1].seconds} for interval in timestream] time_on_court = {'gameId': game.id, 'times': time_data} self._coll.update_one({'id': self.id}, {'$addToSet': {'timeOnCourt': time_on_court}}) def time_played(self, game, unit='seconds'): return compute_ts_length(self.time_on_court(game), unit=unit) def subbed_in_at_quarter(self, game): pass def minutes_played(self, game): box_score = PlayerBoxscore(game.player_boxscore(self)) return box_score.total_seconds_played / 60.0 def made_shots(self, game): return [event for event in game.events if event.is_field_goal_made and event.players[0] == self] def missed_shots(self, game): return [event for event in game.events if event.is_field_goal_missed and event.players[0] == self] def shot_chart(self, game, **kwargs): made_shots = self.made_shots(game) missed_shots = self.missed_shots(game) if 'plot_type' in kwargs: plot_type = kwargs['plot_type'] else: plot_type = 'hexbin' if 'hex_size' in kwargs: hex_size = kwargs['hex_size'] else: hex_size = 1 if 'overplot_shots' in kwargs: overplot_shots = kwargs['overplot_shots'] else: overplot_shots = False gd = game.date team1_name = game.home_team.nickname team2_name = game.away_team.nickname first_name, last_name = self.first_name, self.last_name create_shot_chart(made_shots, missed_shots, 'plots/players/{}_{}_shots_{}_{}_vs_{}.pdf'.format(first_name, last_name, gd, team1_name, team2_name), '{} {} on {} - {} vs {}'.format(first_name, last_name, gd, team1_name, team2_name), plot_type=plot_type, hex_size=hex_size, overplot_shots=overplot_shots) def multi_game_shot_chart(self, games, **kwargs): made_shots = [] missed_shots = [] start_date = None end_date = None for game in games: if game.player_in_game(self): if not start_date: start_date = game.date end_date = game.date made_shots = np.concatenate((made_shots, self.made_shots(game))) missed_shots = np.concatenate((missed_shots, self.missed_shots(game))) if 'plot_type' in kwargs: plot_type = kwargs['plot_type'] else: plot_type = 'hexbin' if 'hex_size' in kwargs: hex_size = kwargs['hex_size'] else: hex_size = 1 if 'overplot_shots' in kwargs: overplot_shots = kwargs['overplot_shots'] else: overplot_shots = False first_name, last_name = self.first_name, self.last_name create_shot_chart(made_shots, missed_shots, 'plots/players/{}_{}_shots_from_{}_to_{}.pdf'.format(first_name, last_name, start_date, end_date), '{} {} from {} to {}'.format(first_name, last_name, start_date, end_date), plot_type=plot_type, hex_size=hex_size, overplot_shots=overplot_shots) def plot_cumul_charts(player_id, hex_sizes, output_types): for hex_size in hex_sizes: for output_type in output_types: cumul_team_shot_chart_with_player(player_id, hex_size=hex_size, output_type=output_type, scale_factor=128) cumul_team_shot_chart_without_player(player_id, hex_size=hex_size, output_type=output_type, scale_factor=128) cumul_opp_shot_chart_with_player(player_id, hex_size=hex_size, output_type=output_type, scale_factor=128) cumul_opp_shot_chart_without_player(player_id, hex_size=hex_size, output_type=output_type, scale_factor=128) def drtg(self, game): box_score = game.player_boxscore(self) team = game.player_team(self) opp = game.opponent(team) team_stats = team.stats(game)['teamStats'] opp_stats = opp.stats(game)['teamStats'] drb = box_score['rebounds']['defensive'] mp = box_score['totalSecondsPlayed'] / 60.0 stl = box_score['steals'] blk = box_score['blockedShots'] pf = box_score['personalFouls'] team_mp = team_stats['minutes'] team_drb = team_stats['rebounds']['defensive'] team_blk = team_stats['blockedShots'] team_stl = team_stats['steals'] team_pf = team_stats['personalFouls'] team_pos = team.possessions(game) team_drtg = team.drtg(game) opp_orb = opp_stats['rebounds']['offensive'] opp_fga = opp_stats['fieldGoals']['attempted'] opp_fgm = opp_stats['fieldGoals']['made'] opp_tov = opp_stats['turnovers']['total'] opp_ftm = opp_stats['freeThrows']['made'] opp_fta = opp_stats['freeThrows']['attempted'] opp_mp = opp_stats['minutes'] opp_pts = opp_stats['points'] dfg_pct = opp_fgm / opp_fga dor_pct = opp_orb / (team_drb + opp_orb) fmwt = (dfg_pct * (1 - dor_pct)) / (dfg_pct * (1 - dor_pct) + (1 - dfg_pct) * dor_pct) stops1 = stl + blk * fmwt * (1 - 1.07 * dor_pct) + drb * (1 - fmwt) stops2 = (((opp_fga - opp_fgm - team_blk) / team_mp) * fmwt * (1 - 1.07 * dor_pct) + ((opp_tov - team_stl) / team_mp)) * mp + \ (pf / team_pf) * 0.4 * opp_fta * (1 - (opp_ftm / opp_fta))**2 stops_tot = stops1 + stops2 stop_pct = (stops_tot * opp_mp) / (team_pos * mp) d_pts_per_scrposs = opp_pts / (opp_fgm + (1 - (1 - (opp_ftm / opp_fta))**2) * opp_fta * 0.4) drtg = team_drtg + 0.2 * (100 * d_pts_per_scrposs * (1 - stop_pct) - team_drtg) return drtg def ortg(self, game): box_score = game.player_boxscore(self) team = game.player_team(self) opp = game.opponent(team) team_stats = team.stats(game)['teamStats'] opp_stats = opp.stats(game)['teamStats'] ast = box_score['assists'] fgm = box_score['fieldGoals']['made'] fga = box_score['fieldGoals']['attempted'] ftm = box_score['freeThrows']['made'] fta = box_score['freeThrows']['attempted'] tov = box_score['turnovers'] threes = box_score['threePointFieldGoals']['made'] orb = box_score['rebounds']['offensive'] pts = box_score['points'] mp = box_score['totalSecondsPlayed']/ 60.0 team_fgm = team_stats['fieldGoals']['made'] team_fga = team_stats['fieldGoals']['attempted'] team_ast = team_stats['assists'] team_mp = team_stats['minutes'] team_ftm = team_stats['freeThrows']['made'] team_fta = team_stats['freeThrows']['attempted'] team_orb = team_stats['rebounds']['offensive'] team_pts = team_stats['points'] team_3pm = team_stats['threePointFieldGoals']['made'] team_tov = team_stats['turnovers']['total'] opp_drb = opp_stats['rebounds']['defensive'] team_orb_pct = team_orb / (opp_drb + team_orb) ft_part = (1 - (1 - (ftm / fta))**2) * 0.4 * fta ast_part = 0.5 * (((team_pts - team_ftm) - (pts - ftm)) / (2 * (team_fga - fga))) * ast q_ast = ((mp / (team_mp / 5)) * (1.14 * ((team_ast - ast) / team_fgm))) + ((((team_ast / team_mp) * mp * 5 - ast) / ((team_fgm / team_mp) * mp * 5 - fgm)) * (1 - (mp / (team_mp / 5)))) fg_part = fgm * (1 - 0.5 * ((pts - ftm) / (2 * fga)) * q_ast) team_scoring_poss = team_fgm + (1 - (1 - (team_ftm / team_fta))**2) * team_fta * 0.4 team_play_pct = team_scoring_poss / (team_fga + team_fta * 0.4 + team_tov) team_orb_weight = ((1 - team_orb_pct) * team_play_pct) / ((1 - team_orb_pct) * team_play_pct + team_orb_pct * (1 - team_play_pct)) orb_part = orb * team_orb_weight * team_play_pct scr_poss = (fg_part + ast_part + ft_part) * (1 - (team_orb / team_scoring_poss) * team_orb_weight * team_play_pct) + orb_part fg_x_poss = (fga - fgm) * (1 - 1.07 * team_orb_pct) ft_x_poss = ((1 - (ftm / fta))**2) * 0.4 * fta tot_poss = scr_poss + fg_x_poss + ft_x_poss + tov pprod_fg_part = 2 * (fgm + 0.5 * threes) * (1 - 0.5 * ((pts - ftm) / (2 * fga)) * q_ast) pprod_ast_part = 2 * ((team_fgm - fgm + 0.5 * (team_3pm - threes)) / (team_fgm - fgm)) * 0.5 * (((team_pts - team_ftm) - (pts - ftm)) / (2 * (team_fga - fga))) * ast pprod_orb_part = orb * team_orb_weight * team_play_pct * (team_pts / (team_fgm + (1 - (1 - (team_ftm / team_fta))**2) * 0.4 * team_fta)) pprod = (pprod_fg_part + pprod_ast_part + ftm) * (1 - (team_orb / team_scoring_poss) * team_orb_weight * team_play_pct) + pprod_orb_part ortg = 100 * pprod / tot_poss return ortg def usage(self, game): box_score = game.player_boxscore(self) team = game.player_team(self) team_stats = team.stats(game)['teamStats'] fga = box_score['fieldGoals']['attempted'] fta = box_score['freeThrows']['attempted'] tov = box_score['turnovers'] mp = box_score['totalSecondsPlayed'] / 60.0 team_fga = team_stats['fieldGoals']['attempted'] team_fta = team_stats['freeThrows']['attempted'] team_tov = team_stats['turnovers']['total'] team_mp = team_stats['minutes'] usg = 100 * ((fga + 0.44 * fta + tov) * (team_mp / 5)) / (mp * (team_fga + 0.44 * team_fta + team_tov)) return usg
gpl-2.0
-1,942,377,355,209,790,200
37.746862
192
0.539741
false
lbartoletti/QGIS
python/plugins/processing/algs/qgis/VectorLayerScatterplot3D.py
30
4715
# -*- coding: utf-8 -*- """ *************************************************************************** EquivalentNumField.py --------------------- Date : January 2013 Copyright : (C) 2013 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'January 2013' __copyright__ = '(C) 2013, Victor Olaya' import warnings from qgis.core import (QgsProcessingParameterFeatureSource, QgsProcessingParameterField, QgsProcessingParameterFileDestination, QgsProcessingException) from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm from processing.tools import vector from qgis.PyQt.QtCore import QCoreApplication class VectorLayerScatterplot3D(QgisAlgorithm): INPUT = 'INPUT' OUTPUT = 'OUTPUT' XFIELD = 'XFIELD' YFIELD = 'YFIELD' ZFIELD = 'ZFIELD' def group(self): return self.tr('Plots') def groupId(self): return 'plots' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterField(self.XFIELD, self.tr('X attribute'), parentLayerParameterName=self.INPUT, type=QgsProcessingParameterField.Numeric)) self.addParameter(QgsProcessingParameterField(self.YFIELD, self.tr('Y attribute'), parentLayerParameterName=self.INPUT, type=QgsProcessingParameterField.Numeric)) self.addParameter(QgsProcessingParameterField(self.ZFIELD, self.tr('Z attribute'), parentLayerParameterName=self.INPUT, type=QgsProcessingParameterField.Numeric)) self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT, self.tr('Histogram'), self.tr('HTML files (*.html)'))) def name(self): return 'scatter3dplot' def displayName(self): return self.tr('Vector layer scatterplot 3D') def processAlgorithm(self, parameters, context, feedback): try: # importing plotly throws Python warnings from within the library - filter these out with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=ResourceWarning) warnings.filterwarnings("ignore", category=ImportWarning) import plotly as plt import plotly.graph_objs as go except ImportError: raise QgsProcessingException(QCoreApplication.translate('VectorLayerScatterplot3D', 'This algorithm requires the Python “plotly” library. Please install this library and try again.')) source = self.parameterAsSource(parameters, self.INPUT, context) if source is None: raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT)) xfieldname = self.parameterAsString(parameters, self.XFIELD, context) yfieldname = self.parameterAsString(parameters, self.YFIELD, context) zfieldname = self.parameterAsString(parameters, self.ZFIELD, context) output = self.parameterAsFileOutput(parameters, self.OUTPUT, context) values = vector.values(source, xfieldname, yfieldname, zfieldname) data = [go.Scatter3d( x=values[xfieldname], y=values[yfieldname], z=values[zfieldname], mode='markers')] plt.offline.plot(data, filename=output, auto_open=False) return {self.OUTPUT: output}
gpl-2.0
1,202,972,566,750,024,400
43.028037
195
0.536616
false
grhawk/ASE
ase/optimize/minimahopping.py
2
29113
import os import numpy as np from ase import io, units from ase.optimize import QuasiNewton from ase.parallel import paropen, rank, world from ase.md import VelocityVerlet from ase.md import MDLogger from ase.md.velocitydistribution import MaxwellBoltzmannDistribution class MinimaHopping: """Implements the minima hopping method of global optimization outlined by S. Goedecker, J. Chem. Phys. 120: 9911 (2004). Initialize with an ASE atoms object. Optional parameters are fed through keywords. To run multiple searches in parallel, specify the minima_traj keyword, and have each run point to the same path. """ _default_settings = { 'T0': 1000., # K, initial MD 'temperature' 'beta1': 1.1, # temperature adjustment parameter 'beta2': 1.1, # temperature adjustment parameter 'beta3': 1. / 1.1, # temperature adjustment parameter 'Ediff0': 0.5, # eV, initial energy acceptance threshold 'alpha1': 0.98, # energy threshold adjustment parameter 'alpha2': 1. / 0.98, # energy threshold adjustment parameter 'mdmin': 2, # criteria to stop MD simulation (no. of minima) 'logfile': 'hop.log', # text log 'minima_threshold': 0.5, # A, threshold for identical configs 'timestep': 1.0, # fs, timestep for MD simulations 'optimizer': QuasiNewton, # local optimizer to use 'minima_traj': 'minima.traj', # storage file for minima list 'fmax': 0.05, # eV/A, max force for optimizations } def __init__(self, atoms, **kwargs): """Initialize with an ASE atoms object and keyword arguments.""" self._atoms = atoms for key in kwargs: if not key in self._default_settings: raise RuntimeError('Unknown keyword: %s' % key) for k, v in self._default_settings.items(): setattr(self, '_%s' % k, kwargs.pop(k, v)) self._passedminimum = PassedMinimum() # when a MD sim. has passed # a local minimum # Misc storage. self._previous_optimum = None self._previous_energy = None self._temperature = self._T0 self._Ediff = self._Ediff0 def __call__(self, totalsteps=None): """Run the minima hopping algorithm. The total number of steps can be specified, other wise runs indefinitely (or until stopped by batching software).""" self._startup() while True: if (totalsteps and self._counter >= totalsteps): self._log('msg', 'Run terminated. Step #%i reached of ' '%i allowed. Increase totalsteps if resuming.' % (self._counter, totalsteps)) return self._previous_optimum = self._atoms.copy() self._previous_energy = self._atoms.get_potential_energy() self._molecular_dynamics() self._optimize() self._counter += 1 self._check_results() def _startup(self): """Initiates a run, and determines if running from previous data or a fresh run.""" status = np.array(-1.) exists = self._read_minima() if rank == 0: if not exists: # Fresh run with new minima file. status = np.array(0.) elif not os.path.exists(self._logfile): # Fresh run with existing or shared minima file. status = np.array(1.) else: # Must be resuming from within a working directory. status = np.array(2.) world.barrier() world.broadcast(status, 0) if status == 2.: self._resume() else: self._counter = 0 self._log('init') self._log('msg', 'Performing initial optimization.') if status == 1.: self._log('msg', 'Using existing minima file with %i prior ' 'minima: %s' % (len(self._minima), self._minima_traj)) self._optimize() self._check_results() self._counter += 1 def _resume(self): """Attempt to resume a run, based on information in the log file. Note it will almost always be interrupted in the middle of either a qn or md run or when exceeding totalsteps, so it only has been tested in those cases currently.""" f = paropen(self._logfile, 'r') lines = f.read().splitlines() f.close() self._log('msg', 'Attempting to resume stopped run.') self._log('msg', 'Using existing minima file with %i prior ' 'minima: %s' % (len(self._minima), self._minima_traj)) mdcount, qncount = 0, 0 for line in lines: if (line[:4] == 'par:') and ('Ediff' not in line): self._temperature = eval(line.split()[1]) self._Ediff = eval(line.split()[2]) elif line[:18] == 'msg: Optimization:': qncount = int(line[19:].split('qn')[1]) elif line[:24] == 'msg: Molecular dynamics:': mdcount = int(line[25:].split('md')[1]) self._counter = max((mdcount, qncount)) if qncount == mdcount: # Either stopped during local optimization or terminated due to # max steps. self._log('msg', 'Attempting to resume at qn%05i' % qncount) if qncount > 0: atoms = io.read('qn%05i.traj' % (qncount - 1), index=-1) self._previous_optimum = atoms.copy() self._previous_energy = atoms.get_potential_energy() if os.path.getsize('qn%05i.traj' % qncount) > 0: atoms = io.read('qn%05i.traj' % qncount, index=-1) else: atoms = io.read('md%05i.traj' % qncount, index=-3) self._atoms.positions = atoms.get_positions() fmax = np.sqrt((atoms.get_forces() ** 2).sum(axis=1).max()) if fmax < self._fmax: # Stopped after a qn finished. self._log('msg', 'qn%05i fmax already less than fmax=%.3f' % (qncount, self._fmax)) self._counter += 1 return self._optimize() self._counter += 1 if qncount > 0: self._check_results() else: self._record_minimum() self._log('msg', 'Found a new minimum.') self._log('msg', 'Accepted new minimum.') self._log('par') elif qncount < mdcount: # Probably stopped during molecular dynamics. self._log('msg', 'Attempting to resume at md%05i.' % mdcount) atoms = io.read('qn%05i.traj' % qncount, index=-1) self._previous_optimum = atoms.copy() self._previous_energy = atoms.get_potential_energy() self._molecular_dynamics(resume=mdcount) self._optimize() self._counter += 1 self._check_results() def _check_results(self): """Adjusts parameters and positions based on outputs.""" # No prior minima found? self._read_minima() if len(self._minima) == 0: self._log('msg', 'Found a new minimum.') self._log('msg', 'Accepted new minimum.') self._record_minimum() self._log('par') return # Returned to starting position? if self._previous_optimum: compare = ComparePositions(translate=False) dmax = compare(self._atoms, self._previous_optimum) self._log('msg', 'Max distance to last minimum: %.3f A' % dmax) if dmax < self._minima_threshold: self._log('msg', 'Re-found last minimum.') self._temperature *= self._beta1 self._log('par') return # In a previously found position? unique, dmax_closest = self._unique_minimum_position() self._log('msg', 'Max distance to closest minimum: %.3f A' % dmax_closest) if not unique: self._temperature *= self._beta2 self._log('msg', 'Found previously found minimum.') self._log('par') if self._previous_optimum: self._log('msg', 'Restoring last minimum.') self._atoms.positions = self._previous_optimum.positions return # Must have found a unique minimum. self._temperature *= self._beta3 self._log('msg', 'Found a new minimum.') self._log('par') if (self._atoms.get_potential_energy() < self._previous_energy + self._Ediff): self._log('msg', 'Accepted new minimum.') self._Ediff *= self._alpha1 self._log('par') self._record_minimum() else: self._log('msg', 'Rejected new minimum due to energy. ' 'Restoring last minimum.') self._atoms.positions = self._previous_optimum.positions self._Ediff *= self._alpha2 self._log('par') def _log(self, cat='msg', message=None): """Records the message as a line in the log file.""" if cat == 'init': if rank == 0: if os.path.exists(self._logfile): raise RuntimeError('File exists: %s' % self._logfile) f = paropen(self._logfile, 'w') f.write('par: %12s %12s %12s\n' % ('T (K)', 'Ediff (eV)', 'mdmin')) f.write('ene: %12s %12s %12s\n' % ('E_current', 'E_previous', 'Difference')) f.close() return f = paropen(self._logfile, 'a') if cat == 'msg': line = 'msg: %s' % message elif cat == 'par': line = ('par: %12.4f %12.4f %12i' % (self._temperature, self._Ediff, self._mdmin)) elif cat == 'ene': current = self._atoms.get_potential_energy() if self._previous_optimum: previous = self._previous_energy line = ('ene: %12.5f %12.5f %12.5f' % (current, previous, current - previous)) else: line = ('ene: %12.5f' % current) f.write(line + '\n') f.close() def _optimize(self): """Perform an optimization.""" self._atoms.set_momenta(np.zeros(self._atoms.get_momenta().shape)) opt = self._optimizer(self._atoms, trajectory='qn%05i.traj' % self._counter, logfile='qn%05i.log' % self._counter) self._log('msg', 'Optimization: qn%05i' % self._counter) opt.run(fmax=self._fmax) self._log('ene') def _record_minimum(self): """Adds the current atoms configuration to the minima list.""" traj = io.PickleTrajectory(self._minima_traj, 'a') traj.write(self._atoms) self._read_minima() self._log('msg', 'Recorded minima #%i.' % (len(self._minima) - 1)) def _read_minima(self): """Reads in the list of minima from the minima file.""" exists = os.path.exists(self._minima_traj) if exists: empty = os.path.getsize(self._minima_traj) == 0 if os.path.exists(self._minima_traj): if not empty: traj = io.PickleTrajectory(self._minima_traj, 'r') self._minima = [atoms for atoms in traj] else: self._minima = [] return True else: self._minima = [] return False def _molecular_dynamics(self, resume=None): """Performs a molecular dynamics simulation, until mdmin is exceeded. If resuming, the file number (md%05i) is expected.""" self._log('msg', 'Molecular dynamics: md%05i' % self._counter) mincount = 0 energies, oldpositions = [], [] thermalized = False if resume: self._log('msg', 'Resuming MD from md%05i.traj' % resume) if os.path.getsize('md%05i.traj' % resume) == 0: self._log('msg', 'md%05i.traj is empty. Resuming from ' 'qn%05i.traj.' % (resume, resume - 1)) atoms = io.read('qn%05i.traj' % (resume - 1), index=-1) else: images = io.PickleTrajectory('md%05i.traj' % resume, 'r') for atoms in images: energies.append(atoms.get_potential_energy()) oldpositions.append(atoms.positions.copy()) passedmin = self._passedminimum(energies) if passedmin: mincount += 1 self._atoms.set_momenta(atoms.get_momenta()) thermalized = True self._atoms.positions = atoms.get_positions() self._log('msg', 'Starting MD with %i existing energies.' % len(energies)) if not thermalized: MaxwellBoltzmannDistribution(self._atoms, temp=self._temperature * units.kB, force_temp=True) traj = io.PickleTrajectory('md%05i.traj' % self._counter, 'a', self._atoms) dyn = VelocityVerlet(self._atoms, dt=self._timestep * units.fs) log = MDLogger(dyn, self._atoms, 'md%05i.log' % self._counter, header=True, stress=False, peratom=False) dyn.attach(log, interval=1) dyn.attach(traj, interval=1) while mincount < self._mdmin: dyn.run(1) energies.append(self._atoms.get_potential_energy()) passedmin = self._passedminimum(energies) if passedmin: mincount += 1 oldpositions.append(self._atoms.positions.copy()) # Reset atoms to minimum point. self._atoms.positions = oldpositions[passedmin[0]] def _unique_minimum_position(self): """Identifies if the current position of the atoms, which should be a local minima, has been found before.""" unique = True dmax_closest = 99999. compare = ComparePositions(translate=True) self._read_minima() for minimum in self._minima: dmax = compare(minimum, self._atoms) if dmax < self._minima_threshold: unique = False if dmax < dmax_closest: dmax_closest = dmax return unique, dmax_closest class ComparePositions: """Class that compares the atomic positions between two ASE atoms objects. Returns the maximum distance that any atom has moved, assuming all atoms of the same element are indistinguishable. If translate is set to True, allows for arbitrary translations within the unit cell, as well as translations across any periodic boundary conditions. When called, returns the maximum displacement of any one atom.""" def __init__(self, translate=True): self._translate = translate def __call__(self, atoms1, atoms2): atoms1 = atoms1.copy() atoms2 = atoms2.copy() if not self._translate: dmax = self. _indistinguishable_compare(atoms1, atoms2) else: dmax = self._translated_compare(atoms1, atoms2) return dmax def _translated_compare(self, atoms1, atoms2): """Moves the atoms around and tries to pair up atoms, assuming any atoms with the same symbol are indistinguishable, and honors periodic boundary conditions (for example, so that an atom at (0.1, 0., 0.) correctly is found to be close to an atom at (7.9, 0., 0.) if the atoms are in an orthorhombic cell with x-dimension of 8. Returns dmax, the maximum distance between any two atoms in the optimal configuration.""" atoms1.set_constraint() atoms2.set_constraint() for index in range(3): assert atoms1.pbc[index] == atoms2.pbc[index] least = self._get_least_common(atoms1) indices1 = [atom.index for atom in atoms1 if atom.symbol == least[0]] indices2 = [atom.index for atom in atoms2 if atom.symbol == least[0]] # Make comparison sets from atoms2, which contain repeated atoms in # all pbc's and bring the atom listed in indices2 to (0,0,0) comparisons = [] repeat = [] for bc in atoms2.pbc: if bc == True: repeat.append(3) else: repeat.append(1) repeated = atoms2.repeat(repeat) moved_cell = atoms2.cell * atoms2.pbc for moved in moved_cell: repeated.translate(-moved) repeated.set_cell(atoms2.cell) for index in indices2: comparison = repeated.copy() comparison.translate(-atoms2[index].position) comparisons.append(comparison) # Bring the atom listed in indices1 to (0,0,0) [not whole list] standard = atoms1.copy() standard.translate(-atoms1[indices1[0]].position) # Compare the standard to the comparison sets. dmaxes = [] for comparison in comparisons: dmax = self._indistinguishable_compare(standard, comparison) dmaxes.append(dmax) return min(dmaxes) def _get_least_common(self, atoms): """Returns the least common element in atoms. If more than one, returns the first encountered.""" symbols = [atom.symbol for atom in atoms] least = ['', np.inf] for element in set(symbols): count = symbols.count(element) if symbols.count(element) < least[1]: least = [element, symbols.count(element)] return least def _indistinguishable_compare(self, atoms1, atoms2): """Finds each atom in atoms1's nearest neighbor with the same chemical symbol in atoms2. Return dmax, the farthest distance an individual atom differs by.""" atoms2 = atoms2.copy() # allow deletion atoms2.set_constraint() dmax = 0. for atom1 in atoms1: closest = [np.nan, np.inf] for index, atom2 in enumerate(atoms2): if atom2.symbol == atom1.symbol: d = np.linalg.norm(atom1.position - atom2.position) if d < closest[1]: closest = [index, d] if closest[1] > dmax: dmax = closest[1] del atoms2[closest[0]] return dmax class PassedMinimum: """Simple routine to find if a minimum in the potential energy surface has been passed. In its default settings, a minimum is found if the sequence ends with two downward points followed by two upward points. Initialize with n_down and n_up, integer values of the number of up and down points. If it has successfully determined it passed a minimum, it returns the value (energy) of that minimum and the number of positions back it occurred, otherwise returns None.""" def __init__(self, n_down=2, n_up=2): self._ndown = n_down self._nup = n_up def __call__(self, energies): if len(energies) < (self._nup + self._ndown + 1): return None status = True index = -1 for i_up in range(self._nup): if energies[index] < energies[index - 1]: status = False index -= 1 for i_down in range(self._ndown): if energies[index] > energies[index - 1]: status = False index -= 1 if status: return (-self._nup - 1), energies[-self._nup - 1] class MHPlot: """Makes a plot summarizing the output of the MH algorithm from the specified rundirectory. If no rundirectory is supplied, uses the current directory.""" def __init__(self, rundirectory=None, logname='hop.log'): if not rundirectory: rundirectory = os.getcwd() self._rundirectory = rundirectory self._logname = logname self._read_log() self._fig, self._ax = self._makecanvas() self._plot_data() def get_figure(self): """Returns the matplotlib figure object.""" return self._fig def save_figure(self, filename): """Saves the file to the specified path, with any allowed matplotlib extension (e.g., .pdf, .png, etc.).""" self._fig.savefig(filename) def _read_log(self): """Reads relevant parts of the log file.""" data = [] # format: [energy, status, temperature, ediff] f = open(os.path.join(self._rundirectory, self._logname), 'r') lines = f.read().splitlines() f.close() step_almost_over = False step_over = False for line in lines: if line.startswith('msg: Molecular dynamics:'): status = 'performing MD' elif line.startswith('msg: Optimization:'): status = 'performing QN' elif line.startswith('ene:'): status = 'local optimum reached' energy = floatornan(line.split()[1]) elif line.startswith('msg: Accepted new minimum.'): status = 'accepted' step_almost_over = True elif line.startswith('msg: Found previously found minimum.'): status = 'previously found minimum' step_almost_over = True elif line.startswith('msg: Re-found last minimum.'): status = 'previous minimum' step_almost_over = True elif line.startswith('msg: Rejected new minimum'): status = 'rejected' step_almost_over = True elif line.startswith('par: '): temperature = floatornan(line.split()[1]) ediff = floatornan(line.split()[2]) if step_almost_over: step_over = True step_almost_over = False if step_over: data.append([energy, status, temperature, ediff]) step_over = False if data[-1][1] != status: data.append([np.nan, status, temperature, ediff]) self._data = data def _makecanvas(self): from matplotlib import pyplot from matplotlib.ticker import ScalarFormatter fig = pyplot.figure(figsize=(6., 8.)) lm, rm, bm, tm = 0.22, 0.02, 0.05, 0.04 vg1 = 0.01 # between adjacent energy plots vg2 = 0.03 # between different types of plots ratio = 2. # size of an energy plot to a parameter plot figwidth = 1. - lm - rm totalfigheight = 1. - bm - tm - vg1 - 2. * vg2 parfigheight = totalfigheight / (2. * ratio + 2) epotheight = ratio * parfigheight ax1 = fig.add_axes((lm, bm, figwidth, epotheight)) ax2 = fig.add_axes((lm, bm + epotheight + vg1, figwidth, epotheight)) for ax in [ax1, ax2]: ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=False)) ediffax = fig.add_axes((lm, bm + 2. * epotheight + vg1 + vg2, figwidth, parfigheight)) tempax = fig.add_axes((lm, (bm + 2 * epotheight + vg1 + 2 * vg2 + parfigheight), figwidth, parfigheight)) for ax in [ax2, tempax, ediffax]: ax.set_xticklabels([]) ax1.set_xlabel('step') tempax.set_ylabel('$T$, K') ediffax.set_ylabel('$E_\mathrm{diff}$, eV') for ax in [ax1, ax2]: ax.set_ylabel('$E_\mathrm{pot}$, eV') ax = CombinedAxis(ax1, ax2, tempax, ediffax) self._set_zoomed_range(ax) ax1.spines['top'].set_visible(False) ax2.spines['bottom'].set_visible(False) return fig, ax def _set_zoomed_range(self, ax): """Try to intelligently set the range for the zoomed-in part of the graph.""" energies = [line[0] for line in self._data if not np.isnan(line[0])] dr = max(energies) - min(energies) if dr == 0.: dr = 1. ax.set_ax1_range((min(energies) - 0.2 * dr, max(energies) + 0.2 * dr)) def _plot_data(self): for step, line in enumerate(self._data): self._plot_energy(step, line) self._plot_qn(step, line) self._plot_md(step, line) self._plot_parameters() self._ax.set_xlim(self._ax.ax1.get_xlim()) def _plot_energy(self, step, line): """Plots energy and annotation for acceptance.""" energy, status = line[0], line[1] if np.isnan(energy): return self._ax.plot([step, step + 0.5], [energy] * 2, '-', color='k', linewidth=2.) if status == 'accepted': self._ax.text(step + 0.51, energy, '$\checkmark$') elif status == 'rejected': self._ax.text(step + 0.51, energy, '$\Uparrow$', color='red') elif status == 'previously found minimum': self._ax.text(step + 0.51, energy, '$\hookleftarrow$', color='red', va='center') elif status == 'previous minimum': self._ax.text(step + 0.51, energy, '$\leftarrow$', color='red', va='center') def _plot_md(self, step, line): """Adds a curved plot of molecular dynamics trajectory.""" if step == 0: return energies = [self._data[step - 1][0]] file = os.path.join(self._rundirectory, 'md%05i.traj' % step) traj = io.PickleTrajectory(file, 'r') for atoms in traj: energies.append(atoms.get_potential_energy()) xi = step - 1 + .5 if len(energies) > 2: xf = xi + (step + 0.25 - xi) * len(energies) / (len(energies) - 2.) else: xf = step if xf > (step + .75): xf = step self._ax.plot(np.linspace(xi, xf, num=len(energies)), energies, '-k') def _plot_qn(self, index, line): """Plots a dashed vertical line for the optimization.""" if line[1] == 'performing MD': return file = os.path.join(self._rundirectory, 'qn%05i.traj' % index) if os.path.getsize(file) == 0: return traj = io.PickleTrajectory(file, 'r') energies = [traj[0].get_potential_energy(), traj[-1].get_potential_energy()] if index > 0: file = os.path.join(self._rundirectory, 'md%05i.traj' % index) atoms = io.read(file, index=-3) energies[0] = atoms.get_potential_energy() self._ax.plot([index + 0.25] * 2, energies, ':k') def _plot_parameters(self): """Adds a plot of temperature and Ediff to the plot.""" steps, Ts, ediffs = [], [], [] for step, line in enumerate(self._data): steps.extend([step + 0.5, step + 1.5]) Ts.extend([line[2]] * 2) ediffs.extend([line[3]] * 2) self._ax.tempax.plot(steps, Ts) self._ax.ediffax.plot(steps, ediffs) for ax in [self._ax.tempax, self._ax.ediffax]: ylim = ax.get_ylim() yrange = ylim[1] - ylim[0] ax.set_ylim((ylim[0] - 0.1 * yrange, ylim[1] + 0.1 * yrange)) def floatornan(value): """Converts the argument into a float if possible, np.nan if not.""" try: output = float(value) except ValueError: output = np.nan return output class CombinedAxis: """Helper class for MHPlot to plot on split y axis and adjust limits simultaneously.""" def __init__(self, ax1, ax2, tempax, ediffax): self.ax1 = ax1 self.ax2 = ax2 self.tempax = tempax self.ediffax = ediffax self._ymax = None def set_ax1_range(self, ylim): self._ax1_ylim = ylim self.ax1.set_ylim(ylim) def plot(self, *args, **kwargs): self.ax1.plot(*args, **kwargs) self.ax2.plot(*args, **kwargs) # Re-adjust yrange for yvalue in args[1]: if yvalue > self._ymax: self._ymax = yvalue self.ax1.set_ylim(self._ax1_ylim) self.ax2.set_ylim((self._ax1_ylim[1], self._ymax)) def set_xlim(self, *args): self.ax1.set_xlim(*args) self.ax2.set_xlim(*args) self.tempax.set_xlim(*args) self.ediffax.set_xlim(*args) def text(self, *args, **kwargs): y = args[1] if y < self._ax1_ylim[1]: ax = self.ax1 else: ax = self.ax2 ax.text(*args, **kwargs)
gpl-2.0
-9,189,799,374,051,500,000
40.829023
79
0.542266
false
hronellenfitsch/nesting
cycle_basis.py
1
11955
#!/usr/bin/env python """ cycle_basis.py functions for calculating the cycle basis of a graph """ from numpy import * import networkx as nx import matplotlib import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.path import Path if matplotlib.__version__ >= '1.3.0': from matplotlib.path import Path else: from matplotlib import nxutils from itertools import chain from itertools import ifilterfalse from itertools import izip from itertools import tee from collections import defaultdict import time from helpers import * class Cycle(): """ Represents a set of nodes that make up a cycle in some graph. Is hashable and does not care about orientation or things like that, two cycles are equal if they share the same nodes. A cycle can be compared to a set or frozenset of nodes. path is a list of vertices describing a closed path in the cycle. if it is absent, a closed path will be calculated together with coordinates. coords is an array of x-y pairs representing the coordinates of the cycle path elements. """ def __init__(self, graph, edges, coords=None): """ Initializes the Cycle with an edge list representing the cycle. All edges should be ordered such that a cycle is represented as (1,2)(2,3)(3,4)...(n-2,n-1)(n-1,1) Parameters: graph: The underlying graph object edges: The edge list making up the cycle. is_ordered: If set to false, will use the neighborhood information from graph to construct ordered edge set from unordered one. In case the unordered edge set is not a connected graph, e.g. when removing one cycle splits the surrounding one in half, the smaller connected component in terms of total length is thrown away. Since our cycles are typically convex, this means we use the outermost component. """ self.graph = graph edges, self.total_area = self.ordered_edges(edges) self.path = zip(*edges)[0] if coords is None: self.coords = array([[graph.node[n]['x'], graph.node[n]['y']] for n in self.path]) else: self.coords = coords self.edges = edges # This allows comparisons self.edgeset = set([tuple(sorted(e)) for e in edges]) self.com = mean(self.coords, axis=0) # This frozenset is used to compare/hash cycles. self._nodeset = frozenset(self.path) def ordered_edges(self, edges): """ Uses the graph associated to this cycle to order the unordered edge set. Also return the area of the cycle. This is defined as max(Areas of individual connected components) - (Areas of other connected components) This assumes that the cycle is one large cycle containing one or more smaller cycles. """ # construct subgraph consisting of only the specified edges edge_graph = nx.Graph(edges) con = sorted_connected_components(edge_graph) # Calculate sorted edge list for each connected component # of the cycle component_sorted_edges = [] areas = [] G = self.graph for comp in con: # get ordered list of edges component_edges = comp.edges() n_edges = len(component_edges) sorted_edges = [] start = component_edges[0][0] cur = start prev = None for i in xrange(n_edges): nextn = [n for n in comp.neighbors(cur) if n != prev][0] sorted_edges.append((cur, nextn)) prev = cur cur = nextn # coordinates of path coords = array([(G.node[u]['x'], G.node[u]['y']) for u, v in sorted_edges] \ + [(G.node[sorted_edges[0][0]]['x'], G.node[sorted_edges[0][0]]['y'])]) areas.append(polygon_area(coords)) component_sorted_edges.append(sorted_edges) if len(areas) > 1: areas = sorted(areas, reverse=True) total_area = areas[0] - sum(areas[1:]) else: total_area = areas[0] return list(chain.from_iterable( sorted(component_sorted_edges, key=len, reverse=True))), \ total_area def intersection(self, other): """ Returns an edge set representing the intersection of the two cycles. """ inters = self.edgeset.intersection(other.edgeset) return inters def union(self, other, data=True): """ Returns the edge set corresponding to the union of two cycles. Will overwrite edge/vertex attributes from other to this, so only use if both cycle graphs are the same graph! """ union = self.edgeset.union(other.edgeset) return union def symmetric_difference(self, other, intersection=None): """ Returns a Cycle corresponding to the symmetric difference of the Cycle and other. This is defined as the set of edges which is present in either cycle but not in both. If the intersection has been pre-calculated it can be used. This will fail on non-adjacent loops. """ new_edgeset = list(self.edgeset.symmetric_difference( other.edgeset)) return Cycle(self.graph, new_edgeset) def area(self): """ Returns the area enclosed by the polygon defined by the Cycle. If the cycle contains more than one connected component, this is defined as the area of the largest area connected component minus the areas of the other connected components. """ return self.total_area def radii(self): """ Return the radii of all edges in this cycle. """ return array([self.graph[u][v]['conductivity'] for u, v in self.edgeset]) def __hash__(self): """ Implements hashing by using the internal set description's hash """ return self._nodeset.__hash__() def __eq__(self, other): """ Implements comparison using the internal set description """ if isinstance(other, Cycle): return self._nodeset.__eq__(other._nodeset) elif isinstance(other, frozenset) or isinstance(other, set): return self._nodeset.__eq__(other) else: return -1 def __repr__(self): return repr(self._nodeset) def polygon_area(coords): """ Return the area of a closed polygon """ Xs = coords[:,0] Ys = coords[:,1] # Ignore orientation return 0.5*abs(sum(Xs[:-1]*Ys[1:] - Xs[1:]*Ys[:-1])) def traverse_graph(G, start, nextn): """ Traverses the pruned (i.e. ONLY LOOPS) graph G counter-clockwise in the direction of nextn until start is hit again. If G has treelike components this will fail and get stuck, there is no backtracking. Returns a list of nodes visited, a list of edges visited and an array of node coordinates. This will find (a) all internal smallest loops (faces of the planar graph) and (b) one maximal outer loop """ start_coords = array([G.node[start]['x'], G.node[start]['y']]) nodes_visited = [start] nodes_visited_set = set() edges_visited = [] coords = [start_coords] prev = start cur = nextn while cur != start: cur_coords = array([G.node[cur]['x'], G.node[cur]['y']]) # We ignore all neighbors we alreay visited to avoid multiple loops neighs = [n for n in G.neighbors(cur) if n != prev and n != cur] edges_visited.append((prev, cur)) nodes_visited.append(cur) coords.append(cur_coords) n_neighs = len(neighs) if n_neighs > 1: # Choose path that keeps the loop closest on the left hand side prev_coords = array([G.node[prev]['x'], G.node[prev]['y']]) neigh_coords = array([[G.node[n]['x'], G.node[n]['y']] \ for n in neighs]) ## Construct vectors and normalize u = cur_coords - prev_coords vs = neigh_coords - cur_coords # calculate cos and sin between direction vector and neighbors u /= sqrt((u*u).sum(-1)) vs /= sqrt((vs*vs).sum(-1))[...,newaxis] coss = dot(u, vs.T) sins = cross(u, vs) # this is a function between -2 and +2, where the # leftmost path corresponds to -2, rightmost to +2 # sgn(alpha)(cos(alpha) - 1) ranked = sign(sins)*(coss - 1.) prev = cur cur = neighs[argmin(ranked)] else: # No choice to make prev = cur cur = neighs[0] # Remove pathological protruding loops if prev in nodes_visited_set: n_ind = nodes_visited.index(prev) del nodes_visited[n_ind+1:] del coords[n_ind+1:] del edges_visited[n_ind:] nodes_visited_set.add(prev) edges_visited.append((nodes_visited[-1], nodes_visited[0])) return nodes_visited, edges_visited, array(coords) def cycle_mtp_path(cycle): """ Returns a matplotlib Path object describing the cycle. """ # Set up polygon verts = zeros((cycle.coords.shape[0] + 1, cycle.coords.shape[1])) verts[:-1,:] = cycle.coords verts[-1,:] = cycle.coords[0,:] codes = Path.LINETO*ones(verts.shape[0]) codes[0] = Path.MOVETO codes[-1] = Path.CLOSEPOLY return Path(verts, codes) def outer_loop(G, cycles): """ Detects the boundary loop in the set of fundamental cycles by noting that the boundary is precisely the one loop with maximum area (since it contains all other loops, they all must have smaller area) """ return max([(c.area(), c) for c in cycles])[1] def shortest_cycles(G): """ Returns a list of lists of Cycle objects belonging to the fundamental cycles of the pruned (i.e. there are no treelike components) graph G by traversing the graph counter-clockwise for each node until the starting node has been found. Also returns the outer loop. """ cycleset = set() # Betti number counts interior loops, this algorithm finds # exterior loop as well! n_cycles = G.number_of_edges() - G.number_of_nodes() + 1 # Count outer loop as well if n_cycles >= 2: n_cycles += 1 print "Number of cycles including boundary: {}.".format(n_cycles) t0 = time.time() mst = nx.minimum_spanning_tree(G, weight=None) for u, v in G.edges_iter(): if not mst.has_edge(u, v): # traverse cycle in both directions path, edges, coords = traverse_graph(G, u, v) cycleset.add(Cycle(G, edges, coords=coords)) path, edges, coords = traverse_graph(G, v, u) cycleset.add(Cycle(G, edges, coords=coords)) if len(cycleset) != n_cycles: print "WARNING: Found only", len(cycleset), "cycles!!" t1 = time.time() print "Detected fundamental cycles in {}s".format(t1 - t0) #print "Number of detected facets:", len(cycleset) return list(cycleset) def find_neighbor_cycles(G, cycles): """ Returns a set of tuples of cycle indices describing which cycles share edges """ n_c = len(cycles) # Construct edge dictionary edges = defaultdict(list) for i in xrange(n_c): for e in cycles[i].edges: edges[tuple(sorted(e))].append(i) # Find all neighboring cycles neighbor_cycles = set() for n in edges.values(): neighbor_cycles.add(tuple(sorted(n))) return neighbor_cycles
mit
5,323,205,184,213,012,000
31.663934
75
0.597574
false
rocco8773/bapsflib
bapsflib/_hdf/maps/msi/interferometerarray.py
1
16541
# This file is part of the bapsflib package, a Python toolkit for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 Erik T. Everson and contributors # # License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full # license terms and contributor agreement. # import h5py import numpy as np from bapsflib.utils.errors import HDFMappingError from warnings import warn from .templates import HDFMapMSITemplate class HDFMapMSIInterferometerArray(HDFMapMSITemplate): """ Mapping class for the 'Interferometer array' MSI diagnostic. Simple group structure looks like: .. code-block:: none +-- Interferometer array | +-- Interferometer [0] | | +-- Interferometer summary list | | +-- Interferometer trace | +-- Interferometer [1] | | +-- Interferometer summary list | | +-- Interferometer trace . . . | +-- Interferometer [6] | | +-- Interferometer summary list | | +-- Interferometer trace """ def __init__(self, group: h5py.Group): """ :param group: the HDF5 MSI diagnostic group :type group: :class:`h5py.Group` """ # initialize HDFMapMSITemplate.__init__(self, group) # populate self.configs self._build_configs() def _build_configs(self): """Builds the :attr:`configs` dictionary.""" # What should be in configs # 1. num. of interferometers # 2. start times for each interferometers # 3. dt for each interferometer # 4. n_bar_L for each interferometer # 5. z location for each interferometer # 6. 'shotnum' field # - contains mapping of HDF5 file quantity to np # a. shape # b. dtype # 7. 'signals' field # - another dict where keys are the fields to be added to # the np.array # 8. 'meta' field # # initialize general info values # - pairs[0:2] are found in the main group's attributes # - pairs[2] corresponds to the sub-group names # - pairs[3:] are found in the main group's attributes (as an # array) and in the sub-group attributes (elements of the # main group's array)...I'm choosing to populate via the # sub-group attributes to ensure one-to-one correspondence # when extracting data with the HDFReadMSI class # pairs = [('n interferometer', 'Interferometer count'), ('calib tag', 'Calibration tag'), ('interferometer name', None), ('t0', 'Start time'), ('dt', 'Timestep'), ('n_bar_L', 'n_bar_L'), ('z', 'z location')] self._configs['interferometer name'] = [] self._configs['t0'] = [] self._configs['dt'] = [] self._configs['n_bar_L'] = [] self._configs['z'] = [] for pair in pairs[0:2]: try: val = self.group.attrs[pair[1]] if isinstance(val, (list, tuple, np.ndarray)): self._configs[pair[0]] = val else: self._configs[pair[0]] = [val] except KeyError: self._configs[pair[0]] = [] warn("Attribute '" + pair[1] + "' not found for MSI diagnostic '" + self.device_name + "', continuing with mapping") # more handling of general info value 'n interferometer' pair = pairs[0] check_n_inter = True if len(self._configs[pair[0]]) != 1: check_n_inter = False warn("Attribute '" + pair[1] + "' for MSI diagnostic '" + self.device_name + "' not an integer, continuing with mapping") elif not isinstance(self._configs[pair[0]][0], (int, np.integer)): check_n_inter = False warn("Attribute '" + pair[1] + "' for MSI diagnostic '" + self.device_name + "' not an integer, continuing with mapping") # initialize 'shape' # - this is used by HDFReadMSI self._configs['shape'] = () # initialize 'shotnum' self._configs['shotnum'] = { 'dset paths': [], 'dset field': ('Shot number',), 'shape': [], 'dtype': np.int32, } # initialize 'signals' # - there is only one signal field named 'signal' self._configs['signals'] = { 'signal': { 'dset paths': [], 'dset field': (), 'shape': [], 'dtype': np.float32, } } # initialize 'meta' self._configs['meta'] = { 'timestamp': { 'dset paths': [], 'dset field': ('Timestamp',), 'shape': [], 'dtype': np.float64 }, 'data valid': { 'dset paths': [], 'dset field': ('Data valid',), 'shape': [], 'dtype': np.int8 }, 'peak density': { 'dset paths': [], 'dset field': ('Peak density',), 'shape': [], 'dtype': np.float32 }, } self._configs['meta']['shape'] = () if not check_n_inter \ else (int(self._configs['n interferometer'][0]),) # populate self.configs from each interferometer group # - all the population is done in this for-loop to ensure all # lists are one-to-one # n_inter_count = 0 sn_size = 0 sig_size = 0 for name in self.group: if isinstance(self.group[name], h5py.Group) \ and 'Interferometer' in name: # count the number of interferometers n_inter_count += 1 # ensure required datasets are present for dset_name in ['Interferometer summary list', 'Interferometer trace']: if dset_name not in self.group[name]: why = ("dataset '" + dset_name + "' not found " + "for 'Interferometer/" + name + "'") raise HDFMappingError(self.info['group path'], why=why) # populate general info values self._configs['interferometer name'].append(name) for pair in pairs[3::]: try: self._configs[pair[0]].append( self.group[name].attrs[pair[1]]) except KeyError: self._configs[pair[0]].append(None) warn("Attribute '" + pair[1] + "' not found for MSI diagnostic '" + self.device_name + '/' + name + "', continuing with mapping") # define values to ensure dataset sizes are consistent # sn_size = number of shot numbers # sig_size = number of samples in interferometer trace # - the way the np.array will be constructed # requires all interferometer signals to # have the same sample size # - define sn_size and ensure it's consistent among all # datasets # - define sig_size and ensure it's consistent among all # datasets # # - Enforcement of the these dimensions is done when # mapping each dataset below # if n_inter_count == 1: # define sn_size dset_name = name + '/Interferometer summary list' dset = self.group[dset_name] if dset.ndim == 1: sn_size = self.group[dset_name].shape[0] else: why = "'/Interferometer summary list' " \ "does not match expected shape" raise HDFMappingError(self.info['group path'], why=why) # define sig_size dset_name = name + '/Interferometer trace' dset = self.group[dset_name] shape = self.group[dset_name].shape if dset.dtype.names is not None: # dataset has fields (it should not have fields) why = "can not handle a 'signal' dataset" \ + "(" + dset_name + ") with fields" raise HDFMappingError(self.info['group path'], why=why) elif dset.ndim == 2: if dset.shape[0] == sn_size: sig_size = shape[1] else: why = "'Interferometer trace' and " \ "'Interferometer summary list' do " \ "not have same number of rows " \ "(shot numbers)" raise HDFMappingError( self.info['group path'], why=why) else: why = "'/Interferometer race' does not" \ " match expected shape" raise HDFMappingError(self.info['group path'], why=why) # define 'shape' self._configs['shape'] = (sn_size,) # -- update configs related to ---- # -- 'Interferometer summary list' ---- # - dependent configs are: # 1. 'shotnum' # 2. all of 'meta' # dset_name = name + '/Interferometer summary list' dset = self.group[dset_name] path = dset.name # check 'shape' expected_fields = ['Shot number', 'Timestamp', 'Data valid', 'Peak density'] if dset.shape != (sn_size,): # shape is not consistent among all datasets why = "'/Interferometer summary list' shape " \ "is not consistent across all " \ "interferometers" raise HDFMappingError(self.info['group path'], why=why) elif not all(field in dset.dtype.names for field in expected_fields): # required fields are not present why = "'/Interferometer summary list' does " \ "NOT have required fields" raise HDFMappingError(self.info['group path'], why=why) # update 'shotnum' self._configs['shotnum']['dset paths'].append(path) self._configs['shotnum']['shape'].append( dset.dtype['Shot number'].shape) # update 'meta/timestamp' self._configs['meta']['timestamp'][ 'dset paths'].append(dset.name) self._configs['meta']['timestamp'][ 'shape'].append(dset.dtype['Timestamp'].shape) # update 'meta/data valid' self._configs['meta']['data valid'][ 'dset paths'].append(dset.name) self._configs['meta']['data valid'][ 'shape'].append(dset.dtype['Data valid'].shape) # update 'meta/peak density' self._configs['meta']['peak density'][ 'dset paths'].append(dset.name) self._configs['meta']['peak density'][ 'shape'].append(dset.dtype['Peak density'].shape) # -- update configs related to ---- # -- 'Interferometer trace' ---- # - dependent configs are: # 1. 'signals/signal' # dset_name = name + '/Interferometer trace' dset = self.group[dset_name] # check 'shape' if dset.shape != (sn_size, sig_size): # shape is not consistent among all datasets why = "'/Interferometer trace' shape is" \ "not consistent across all " \ "interferometers" raise HDFMappingError(self.info['group path'], why=why) elif dset.dtype.names is not None: # dataset has fields (it should not have fields) why = "'/Interferometer trace' shape does" \ "not match expected shape " raise HDFMappingError(self.info['group path'], why=why) # update 'signals/signal' values self._configs['signals']['signal'][ 'dset paths'].append(dset.name) # -- Post Populate Checks ---- # check 'shotnum' # 1. convert 'dset paths' from list to tuple # 2. convert 'shape' to a single tuple of shape self._configs['shotnum']['dset paths'] = \ tuple(self._configs['shotnum']['dset paths']) sn_shapes = self._configs['shotnum']['shape'] self._configs['shotnum']['shape'] = sn_shapes[0] # check 'signals' and 'meta' # 1. convert 'dset paths' from list to tuple # 2. every dataset has the same 'shape' for subfield in ('signals', 'meta'): subconfigs = self._configs[subfield] for field, config in subconfigs.items(): # define shape if check_n_inter: # 'n interferometer' was found in the HDF5 file shape = (int(self._configs['n interferometer'][0]), sig_size) else: # 'n interferometer' was NOT found, rely on count shape = (n_inter_count, sig_size) # update ['meta']['shape'] if field == 'shape' and subfield == 'meta': self._configs[subfield][field] = (shape[0],) continue # convert ['dset paths'] to tuple self._configs[subfield][field]['dset paths'] = \ tuple(config['dset paths']) # ensure all fields have the same shape if subfield == 'signals': self._configs[subfield][field]['shape'] = shape else: shapes = self._configs[subfield][field]['shape'] if all(shape == shapes[0] for shape in shapes): self._configs[subfield][field]['shape'] = \ shapes[0] else: why = ("dataset shape for field '" + field + "' is not consistent for all " + "interferometers") raise HDFMappingError(self.info['group path'], why=why) # ensure the number of found interferometers is equal to the # diagnostics 'Interferometer count' # if check_n_inter: if n_inter_count != self._configs['n interferometer'][0]: why = 'num. of found interferometers did not ' \ 'match the expected num. of interferometers' raise HDFMappingError(self.info['group path'], why=why)
bsd-3-clause
-927,206,030,974,763,000
41.089059
72
0.451061
false
caseywstark/void_tools
voboz1.3.2/bin/zobovpostproc.py
1
13114
################# # post-processing routines for zobov # Last modified Jan 28, 2008, by Mark Neyrinck import pylab as M import numpy as N def logpofr_old(r): """ Returns the nat. logarithm of a fit to the probability of finding a zone of density contrast r in a Poisson particle simulation """ # 2D #p = -2.6*(r-1.) # 3D p = -5.12*(r-1) - 0.8*(r-1)**2.8 return p def logpofr(r,whichpofr = '3D'): """ Returns the nat. logarithm of a fit to the probability of finding a zone of density contrast r in a Poisson particle simulation """ if (whichpofr == '3Dfudged'): p = -5.12*(r-1) - 0.8*(r-1)**4.7 elif (whichpofr == '2D'): p = -2.6*(r-1.) elif (whichpofr == '3D'): p = -5.12*(r-1) - 0.8*(r-1)**2.8 else: print 'Unrecognized whichpofr in logpofr:',whichpofr p = 0. return p def mostProbableVoidExtents(prefix, extraoutsuffix = '', densthresh = 0.,whichpofr='3D',plotlabel='',ls='',justdovoids=[], plotcurves=False): """ Truncates large voids at their most probable level. Outputs a plot showing the curve of probabilities at each zone-accretion event. If the program's thinking about joining zones 1 and 2 together, for instance, it compares P(1)P(2) to P(1+2), i.e. the probability that both zones 1 and 2 arose from Poisson noise to the probability that their union arose from Poisson noise. densthresh = an optional parameter keeping links between zones below a certain density, e.g. 0.2. This can still be necessary to halt the growth of the largest void, since the ratio of max to min densities can be quite large. Outputs: .mpve.void returns an ASCII zone list for each output void. .mpve.txt returns a file in the same format as .txt (w/o header) """ if (densthresh == 0.): densthresh = 1e30 voidfile = prefix+'.void' txtfile = prefix+'.txt' mpvevoidfile = prefix+extraoutsuffix+'.mpve.void' mpvetxtfile = prefix+extraoutsuffix+'.mpve.txt' Fvoid = open(voidfile,'r') nvoids = int(Fvoid.readline()) print nvoids,'voids (including possible crazy border voids).' Fvoid.close() voidsread = M.load(txtfile,skiprows=2) nvoids_txt = len(voidsread[:,0]) vid = N.zeros(nvoids,dtype='int') # void id for v in range(nvoids_txt): vid[voidsread[v,1]] = v lodens_init = voidsread[vid,3] vol_init = voidsread[vid,4] np_init = voidsread[vid,5] r_init = voidsread[vid,9] # Read the .void file Fvoid = open(voidfile,'r') nvoids = int(Fvoid.readline()) print nvoids,' voids.' colors = ['b','g','r','c','m','y','k'] mostprobadd = N.zeros(nvoids,dtype='int') r_mpve = 1.*r_init numcurvesplotted = 0 big_rlist = [] big_denslist = [] big_zonestoaddlist = [] big_totalzonelist = [] # "big" lists over all voids/zones naddsarray = N.zeros(nvoids,dtype=int) # read the file for v in range(nvoids): col = colors[v % 7] voidnums = (Fvoid.readline()).split() pos = 1 numzonestoadd, r = int(voidnums[pos]), float(voidnums[pos+1]) denslist = [lodens_init[v]] rlist = [r] zonestoaddlist = [] # we're not including [v] in the zones to add totalzonelist = [v] while (numzonestoadd > 0): zonestoadd = map(int, voidnums[pos+2:pos+2+numzonestoadd]) rnext = float(voidnums[pos+numzonestoadd+3]) dens = lodens_init[v]*r if (dens < densthresh): rlist.append(rnext) denslist.append(dens) zonestoaddlist.append(zonestoadd) # not extend, since we want a list of lists totalzonelist.extend(zonestoadd) # for this one, we just lump all the zones together pos += numzonestoadd+2 numzonestoadd, r = int(voidnums[pos]), float(voidnums[pos+1]) #when we write it, r will be the next r big_rlist.append(rlist) big_denslist.append(denslist) big_zonestoaddlist.append(zonestoaddlist) big_totalzonelist.append(totalzonelist) naddsarray[v] = len(denslist)-1 Fvoid.close() if len(justdovoids) == 0: justdovoids = range(nvoids) for v in justdovoids: denslist = big_denslist[v] if ((naddsarray[v] > 0)*(denslist[0] < densthresh)): col = colors[v % 7] rlist = big_rlist[v] zonestoaddlist = 1*big_zonestoaddlist[v] score = logpofr(rlist[0],whichpofr=whichpofr) scorelist = [score] for adds in range(naddsarray[v]): dens = denslist[adds+1] # This is the linking density for add "adds." if (dens < densthresh): zonestoadd = zonestoaddlist[adds] # see if a subset of zonestoadd exists as a whole void v1; # if so, we don't want to include v1's subvoids' probabilities for v1 in range(nvoids): # There might be a more elegant way to do this. # We see if listtocompare is entirely in zonestoadd; # if so, we take out all but v1 (the first element in zonestoadd. listtocompare = big_totalzonelist[v1] lenlisttocompare = len(listtocompare) if (lenlisttocompare > 1): #otherwise, we might as well leave it in zonesincommon = sum(int(z in zonestoadd) for z in listtocompare) if (zonesincommon == lenlisttocompare): #remove listtocompare from zonestoadd, except for first elt. for i in range(1,len(listtocompare)): zonestoadd.remove(listtocompare[i]) score += logpofr(rlist[adds+1],whichpofr=whichpofr) - logpofr(rlist[adds],whichpofr=whichpofr) print score, rlist[adds+1],rlist[adds],zonestoadd for z in zonestoadd: score -= logpofr(r_init[z],whichpofr=whichpofr) print score scorelist.append(score) scorearray = M.array(scorelist) densarray = M.array(denslist) #rnextarray = M.array(rnextlist) mostprobadd[v] = N.where(scorearray == min(scorearray))[0] r_mpve[v] = rlist[mostprobadd[v]] print v,': most probadd:',mostprobadd[v] print ls if (plotcurves): M.plot(scorearray,ls,label=plotlabel,linewidth=2) numcurvesplotted += 1 if (numcurvesplotted > 0): M.show() # Do it again, but save the most-probable void extents; output Fvoid = open(voidfile,'r') nvoids = int(Fvoid.readline()) Fmpvevoid = open(mpvevoidfile,'w') vol_mpve = 1.*vol_init np_mpve = 1*np_init nz_mpve = 0*np_init Fmpvevoid.write(str(nvoids)+'\n') for v in range(nvoids): zonestoaddlist = 1*big_zonestoaddlist[v] zonelist = [v] for adds in range(mostprobadd[v]): zonestoadd = zonestoaddlist[adds] zonelist.extend(zonestoadd) vol_mpve[v] += M.sum(vol_init[M.array(zonestoadd)]) np_mpve[v] += M.sum(np_init[M.array(zonestoadd)]) nz_mpve[v] = len(zonelist) for z in zonelist: Fmpvevoid.write(str(z)+' ') Fmpvevoid.write('\n') Fvoid.close() Fmpvevoid.close() #replace the old array for rewriting voidsread[vid,6] = 1*nz_mpve voidsread[vid,7] = 1.*vol_mpve voidsread[vid,8] = 1*np_mpve voidsread[vid,9] = 1.*r_mpve voidsread[vid,10] = M.exp(logpofr(r_mpve,whichpofr=whichpofr)) index = (-voidsread[:,9]).argsort() #read x,y,z nvout = 1 Fmpvetxt = open(mpvetxtfile,'w') for i in index: # index is the output of the sort by prob. threshold Fmpvetxt.writelines(str(nvout)+' '+str(int(voidsread[i,1]))+' '+\ str(int(voidsread[i,2]))+' '+str(voidsread[i,3])+' '+\ str(voidsread[i,4])+' '+str(int(voidsread[i,5]))+' '+\ str(int(voidsread[i,6]))+' '+str(voidsread[i,7])+' '+\ str(int(voidsread[i,8]))+' '+str(voidsread[i,9])+' '+\ str(voidsread[i,10])+'\n') nvout += 1 Fmpvetxt.close() def useRThreshold(prefix, extraoutsuffix = '', densthresh = 0., rthresh = 1.,whichpofr='3D'): """ In this method, you set a density-contrast threshold rthresh. Voids not exceeding that threshold are not mentioned in the output file. Voids stop accreting zones if they encounter another zone with r>rthresh. densthresh is an optional parameter below which link densities are constrained. Outputs: .rthresh.%f.void returns an ASCII zone list for each output void, w/ threshold %f. .mpve.%f.txt returns a file in the same format as .txt (w/o header) """ if (densthresh == 0.) : densthresh = 1e30 voidfile = prefix+'.void' txtfile = prefix+'.txt' rthreshvoidfile = prefix+extraoutsuffix+'.rthresh.'+str(rthresh)+'.void' rthreshtxtfile = prefix+extraoutsuffix+'.rthresh.'+str(rthresh)+'.txt' Fvoid = open(voidfile,'r') nvoids = int(Fvoid.readline()) print nvoids,'voids (including possible crazy border voids).' Fvoid.close() voidsread = M.load(txtfile,skiprows=2) nvoids_txt = len(voidsread[:,0]) vid = N.zeros(nvoids,dtype='int') # void id for v in range(nvoids_txt): vid[voidsread[v,1]] = v lodens_init = voidsread[vid,3] vol_init = voidsread[vid,4] np_init = voidsread[vid,5] r_init = voidsread[vid,9] # Read the .void file r_rthresh = 0.*r_init vol_rthresh = 1.*vol_init np_rthresh = 1*np_init nz_rthresh = 0*np_init Fvoid = open(voidfile,'r') nvoids = int(Fvoid.readline()) Frthreshvoid = open(rthreshvoidfile,'w') numpassingrthresh = len(N.where(r_init > rthresh)[0]) print nvoids,' voids,', numpassingrthresh, ' exceed rthresh.' Frthreshvoid.write(str(numpassingrthresh)+'\n') for v in range(nvoids): voidnums = (Fvoid.readline()).split() pos = 1 numzonestoadd, r = int(voidnums[pos]), float(voidnums[pos+1]) nadds = 0 if (r_init[v] > rthresh): zonelist = [v] r_rthresh[v] = r_init[v] #set it to its original value else: zonelist = [] while (numzonestoadd > 0): zonestoadd = M.array(voidnums[pos+2:pos+2+numzonestoadd]).astype(int) dens = lodens_init[v]*r rnext = float(voidnums[pos+numzonestoadd+3]) if (r_rthresh[v] == r_init[v]): # If it's eligible for growing, butwe haven't already quit adding zones if ((max(r_init[zonestoadd]) > rthresh) or (dens > densthresh)): r_rthresh[v] = r print 'r of ',max(r_init[zonestoadd]),' found; stopping accretion' else: zonelist.extend(zonestoadd) vol_rthresh[v] += M.sum(vol_init[M.array(zonestoadd)]) np_rthresh[v] += M.sum(np_init[M.array(zonestoadd)]) pos += numzonestoadd+2 numzonestoadd, r = int(voidnums[pos]), float(voidnums[pos+1]) if (r_init[v] >= rthresh): for z in zonelist: Frthreshvoid.write(str(z)+' ') Frthreshvoid.write('\n') nz_rthresh[v] = len(zonelist) print zonelist Fvoid.close() Frthreshvoid.close() #replace the old array for rewriting voidsread[vid,6] = 1*nz_rthresh voidsread[vid,7] = 1.*vol_rthresh voidsread[vid,8] = 1*np_rthresh voidsread[vid,9] = 1.*r_rthresh voidsread[vid,10] = M.exp(logpofr(r_rthresh,whichpofr=whichpofr)) index = (-voidsread[:,9]).argsort() nvout = 1 Frthreshtxt = open(rthreshtxtfile,'w') for i in index: # index is the output of the sort by prob. threshold if (r_rthresh[int(voidsread[i,1])] > 0.): Frthreshtxt.writelines(str(nvout)+' '+str(int(voidsread[i,1]))+' '+\ str(int(voidsread[i,2]))+' '+str(voidsread[i,3])+' '+\ str(voidsread[i,4])+' '+str(int(voidsread[i,5]))+' '+\ str(int(voidsread[i,6]))+' '+str(voidsread[i,7])+' '+\ str(int(voidsread[i,8]))+' '+str(voidsread[i,9])+' '+\ str(voidsread[i,10])+'\n') nvout += 1 Frthreshtxt.close()
mit
-7,751,480,320,609,637,000
36.255682
141
0.557877
false
kevin-intel/scikit-learn
maint_tools/check_pxd_in_installation.py
17
1962
"""Utility for testing presence and usability of .pxd files in the installation Usage: ------ python check_pxd_in_installation.py path/to/install_dir/of/scikit-learn """ import os import sys import pathlib import tempfile import textwrap import subprocess sklearn_dir = pathlib.Path(sys.argv[1]) pxd_files = list(sklearn_dir.glob("**/*.pxd")) print("> Found pxd files:") for pxd_file in pxd_files: print(' -', pxd_file) print("\n> Trying to compile a cython extension cimporting all corresponding " "modules\n") with tempfile.TemporaryDirectory() as tmpdir: tmpdir = pathlib.Path(tmpdir) # A cython test file which cimports all modules corresponding to found # pxd files. # e.g. sklearn/tree/_utils.pxd becomes `cimport sklearn.tree._utils` with open(tmpdir / 'tst.pyx', 'w') as f: for pxd_file in pxd_files: to_import = str(pxd_file.relative_to(sklearn_dir)) to_import = to_import.replace(os.path.sep, '.') to_import = to_import.replace('.pxd', '') f.write('cimport sklearn.' + to_import + '\n') # A basic setup file to build the test file. # We set the language to c++ and we use numpy.get_include() because # some modules require it. with open(tmpdir / 'setup_tst.py', 'w') as f: f.write(textwrap.dedent( """ from distutils.core import setup from distutils.extension import Extension from Cython.Build import cythonize import numpy extensions = [Extension("tst", sources=["tst.pyx"], language="c++", include_dirs=[numpy.get_include()])] setup(ext_modules=cythonize(extensions)) """)) subprocess.run(["python", "setup_tst.py", "build_ext", "-i"], check=True, cwd=tmpdir) print("\n> Compilation succeeded !")
bsd-3-clause
-6,292,823,459,958,329,000
32.254237
79
0.59633
false
zhreshold/mxnet
tests/python/train/test_bucketing.py
1
4422
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: skip-file import numpy as np import mxnet as mx import random from random import randint from mxnet.contrib.amp import amp import pytest def prepare_bucketing_data(buckets, len_vocab, batch_size, invalid_label, num_sentence): train_sent = [] val_sent = [] for _ in range(num_sentence): len_sentence = randint(6, max(buckets)-1) # leave out the two last buckets empty train_sentence = [] val_sentence = [] for _ in range(len_sentence): train_sentence.append(randint(1, len_vocab)) val_sentence.append(randint(1, len_vocab)) train_sent.append(train_sentence) val_sent.append(val_sentence) data_train = mx.rnn.BucketSentenceIter(train_sent, batch_size, buckets=buckets, invalid_label=invalid_label) data_val = mx.rnn.BucketSentenceIter(val_sent, batch_size, buckets=buckets, invalid_label=invalid_label) return (data_train, data_val) def train_model(context=mx.cpu()): import logging head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.DEBUG, format=head) console = logging.StreamHandler() console.setLevel(logging.DEBUG) logging.getLogger('').addHandler(console) batch_size = 128 num_epochs = 5 num_hidden = 25 num_embed = 25 num_layers = 2 len_vocab = 50 buckets = [5, 10, 20, 30, 40] invalid_label = -1 num_sentence = 1000 data_train, data_val = prepare_bucketing_data(buckets, len_vocab, batch_size, invalid_label, num_sentence) stack = mx.rnn.SequentialRNNCell() for i in range(num_layers): stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_' % i)) def sym_gen(seq_len): data = mx.sym.Variable('data') label = mx.sym.Variable('softmax_label') embed = mx.sym.Embedding(data=data, input_dim=len_vocab, output_dim=num_embed, name='embed') stack.reset() outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True) pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden)) pred = mx.sym.FullyConnected(data=pred, num_hidden=len_vocab, name='pred') label = mx.sym.Reshape(label, shape=(-1,)) loss = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax') return loss, ('data',), ('softmax_label',) contexts = context model = mx.mod.BucketingModule( sym_gen=sym_gen, default_bucket_key=data_train.default_bucket_key, context=contexts) logging.info('Begin fit...') model.fit( train_data=data_train, eval_data=data_val, eval_metric=mx.gluon.metric.Perplexity(invalid_label), # Use Perplexity for multiclass classification. kvstore='device', optimizer='sgd', optimizer_params={'learning_rate': 0.01, 'momentum': 0, 'wd': 0.00001}, initializer=mx.init.Xavier(factor_type="in", magnitude=2.34), num_epoch=num_epochs, batch_end_callback=mx.callback.Speedometer(batch_size, 50)) logging.info('Finished fit...') return model @pytest.mark.garbage_expected def test_bucket_module(): # This test forecasts random sequence of words to check bucketing. # We cannot guarantee the accuracy of such an impossible task, and comments out the following line. # assert model.score(data_val, mx.gluon.metric.MSE())[0][1] < 350, "High mean square error." model = train_model() if __name__ == "__main__": test_bucket_module()
apache-2.0
8,211,663,231,853,985,000
34.376
110
0.652646
false
bchretien/Python-sandbox
src/poly_surface_extrema.py
1
4053
#!/usr/bin/env python """ @author: Benjamin Chretien """ import math import numpy as np from mayavi import mlab j = complex(0,1) min_x = -10. max_x = 10. min_y = -8. max_y = 8. root0 = 1. lamda = 0.01/abs(max_x) step_size = 0.1 def f_evolution_element(x, y): root_real = 2. roots = np.zeros((3,3)) if y < 0: dP = np.poly([root0, root_real + y * j, root_real - y * j]) elif y > 0: dP = np.poly([root0, root_real+y, root_real-y]) else: dP = np.poly([root0, root_real, -root_real]) P = lamda*np.polyint(dP) cplx_roots = np.roots(dP) roots[:,0] = [_.real for _ in cplx_roots if _.real < max_x and _.real > min_x] roots[:,0] = np.sort(roots[:,0]) z = np.polyval(P, x) for i in xrange(roots.shape[0]): roots[i,1] = y roots[i,2] = np.polyval(P, roots[i,0]) return z,roots def f_evolution(x, y): z = np.zeros((x.size, y.size)) root_real = 2. roots = np.zeros((3,y.size,3)) for k in xrange(y.size): if y[k] < 0: dP = np.poly([root0, root_real + y[k] * j, root_real - y[k] * j]) elif y[k] > 0: dP = np.poly([root0, root_real + y[k], root_real-y[k]]) else: dP = np.poly([root0, root_real, -root_real]) P = lamda*np.polyint(dP) cplx_roots = np.roots(dP) roots[:,k,0] = [_.real for _ in cplx_roots if _.real < max_x and _.real > min_x] roots[:,k,0] = np.sort(roots[:,k,0]) for i in xrange(x.size): z[i,k] = np.polyval(P, x[i]) for i in xrange(roots.shape[0]): roots[i,k,1] = y[k] roots[i,k,2] = np.polyval(P, roots[i,k,0]) return z,roots # Grid X = np.arange(min_x, max_x + step_size, step_size) Y = np.arange(min_y, max_y + step_size, step_size) # Compute data Z_evol,roots_evol = f_evolution(X,Y) fig = mlab.figure('Complex roots', bgcolor=(0, 0, 0), size=(800, 600)) # Clamp colors to get a better gradient near the minimum vmin_1 = np.min(Z_evol[:,0:10]) vmax_1 = vmin_1 + 0.02*(np.max(Z_evol[:,0:10]) - vmin_1) # Create the surface s_poly = mlab.surf(X[:],Y[:],Z_evol[:,:], colormap='jet', representation='surface', vmin = vmin_1, vmax = vmax_1, figure=fig) # Real root x = roots_evol[0,0:math.floor(len(Y)/2)+1,0].flatten(0) y = roots_evol[0,0:math.floor(len(Y)/2)+1,1].flatten(0) z = roots_evol[0,0:math.floor(len(Y)/2)+1,2].flatten(0) trajectory1 = mlab.plot3d(x[:], y[:], z[:], color=(1,0,0), tube_radius=None) # Real part of conjugate root x = roots_evol[2,0:math.floor(len(Y)/2)+1,0].flatten(0) y = roots_evol[2,0:math.floor(len(Y)/2)+1,1].flatten(0) z = roots_evol[2,0:math.floor(len(Y)/2)+1,2].flatten(0) trajectory2 = mlab.plot3d(x[:], y[:], z[:], color=(1,1,0), tube_radius=None) # Real root x = roots_evol[2,math.floor(len(Y)/2):-1,0].flatten(0) y = roots_evol[2,math.floor(len(Y)/2):-1,1].flatten(0) z = roots_evol[2,math.floor(len(Y)/2):-1,2].flatten(0) trajectory3 = mlab.plot3d(x[:], y[:], z[:], color=(1,1,0), tube_radius=None) # Real root x = roots_evol[0,math.floor(len(Y)/2):-1,0].flatten(0) y = roots_evol[0,math.floor(len(Y)/2):-1,1].flatten(0) z = roots_evol[0,math.floor(len(Y)/2):-1,2].flatten(0) trajectory4 = mlab.plot3d(x[:], y[:], z[:], color=(1,0,0), tube_radius=None) # Real root x = roots_evol[1,math.floor(len(Y)/2):-1,0].flatten(0) y = roots_evol[1,math.floor(len(Y)/2):-1,1].flatten(0) z = roots_evol[1,math.floor(len(Y)/2):-1,2].flatten(0) trajectory5 = mlab.plot3d(x[:], y[:], z[:], color=(1,1,1), tube_radius=None) # Separation y = 0 x = X y = [0 for _ in xrange(len(x))] z = Z_evol[:,len(Y)/2] trajectory6 = mlab.plot3d(x[:-2], y[:-2], z[:-2], color=(1,1,1), tube_radius=None, opacity=0.5) # Create the axes mlab.axes(s_poly, color=(.7, .7, .7), xlabel='x', ylabel='y < 0: Imag(conj_root)\ny > 0: +/- real root', zlabel='P(x)') # Activate antialiasing #fig.scene.render_window.aa_frames = 8 # Show the result mlab.show()
bsd-2-clause
-8,746,750,877,855,058,000
29.704545
91
0.561559
false
gfyoung/pandas
pandas/tests/reshape/concat/test_series.py
2
4950
import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, Series, concat, date_range, ) import pandas._testing as tm @pytest.fixture(params=[True, False]) def sort(request): """Boolean sort keyword for concat and DataFrame.append.""" return request.param class TestSeriesConcat: def test_concat_series(self): ts = tm.makeTimeSeries() ts.name = "foo" pieces = [ts[:5], ts[5:15], ts[15:]] result = concat(pieces) tm.assert_series_equal(result, ts) assert result.name == ts.name result = concat(pieces, keys=[0, 1, 2]) expected = ts.copy() ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]")) exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))] exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes) expected.index = exp_index tm.assert_series_equal(result, expected) def test_concat_empty_and_non_empty_series_regression(self): # GH 18187 regression test s1 = Series([1]) s2 = Series([], dtype=object) expected = s1 result = pd.concat([s1, s2]) tm.assert_series_equal(result, expected) def test_concat_series_axis1(self, sort=sort): ts = tm.makeTimeSeries() pieces = [ts[:-2], ts[2:], ts[2:-2]] result = concat(pieces, axis=1) expected = DataFrame(pieces).T tm.assert_frame_equal(result, expected) result = concat(pieces, keys=["A", "B", "C"], axis=1) expected = DataFrame(pieces, index=["A", "B", "C"]).T tm.assert_frame_equal(result, expected) # preserve series names, #2489 s = Series(np.random.randn(5), name="A") s2 = Series(np.random.randn(5), name="B") result = concat([s, s2], axis=1) expected = DataFrame({"A": s, "B": s2}) tm.assert_frame_equal(result, expected) s2.name = None result = concat([s, s2], axis=1) tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object")) # must reindex, #2603 s = Series(np.random.randn(3), index=["c", "a", "b"], name="A") s2 = Series(np.random.randn(4), index=["d", "a", "b", "c"], name="B") result = concat([s, s2], axis=1, sort=sort) expected = DataFrame({"A": s, "B": s2}) tm.assert_frame_equal(result, expected) def test_concat_series_axis1_names_applied(self): # ensure names argument is not ignored on axis=1, #23490 s = Series([1, 2, 3]) s2 = Series([4, 5, 6]) result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"]) expected = DataFrame( [[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A") ) tm.assert_frame_equal(result, expected) result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"]) expected = DataFrame( [[1, 4], [2, 5], [3, 6]], columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]), ) tm.assert_frame_equal(result, expected) def test_concat_series_axis1_same_names_ignore_index(self): dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1] s1 = Series(np.random.randn(len(dates)), index=dates, name="value") s2 = Series(np.random.randn(len(dates)), index=dates, name="value") result = concat([s1, s2], axis=1, ignore_index=True) expected = Index(range(2)) tm.assert_index_equal(result.columns, expected, exact=True) @pytest.mark.parametrize( "s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))] ) def test_concat_series_name_npscalar_tuple(self, s1name, s2name): # GH21015 s1 = Series({"a": 1, "b": 2}, name=s1name) s2 = Series({"c": 5, "d": 6}, name=s2name) result = pd.concat([s1, s2]) expected = Series({"a": 1, "b": 2, "c": 5, "d": 6}) tm.assert_series_equal(result, expected) def test_concat_series_partial_columns_names(self): # GH10698 foo = Series([1, 2], name="foo") bar = Series([1, 2]) baz = Series([4, 5]) result = concat([foo, bar, baz], axis=1) expected = DataFrame( {"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1] ) tm.assert_frame_equal(result, expected) result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"]) expected = DataFrame( {"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]}, columns=["red", "blue", "yellow"], ) tm.assert_frame_equal(result, expected) result = concat([foo, bar, baz], axis=1, ignore_index=True) expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]}) tm.assert_frame_equal(result, expected)
bsd-3-clause
636,883,753,715,391,700
33.137931
88
0.546667
false
DOV-Vlaanderen/pydov
tests/test_util_query.py
1
12214
"""Module grouping tests for the pydov.util.query module.""" from itertools import permutations import numpy as np import pandas as pd import pytest from owslib.etree import etree from pydov.util.dovutil import build_dov_url from pydov.util.query import Join, PropertyInList from tests.abstract import clean_xml class TestPropertyInList(object): """Test the PropertyInList query expression.""" def test(self): """Test the PropertyInList expression with a standard list. Test whether the generated query is correct. """ l = ['a', 'b', 'c'] query = PropertyInList('methode', l) xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}Or' assert len(list(xml)) == 3 for f in xml: assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'methode' literal = f.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l.remove(literal.text) assert len(l) == 0 def test_stable(self): """Test the PropertyInList expression with a standard list. Test whether the generated query is correct and stable. """ l = ['a', 'b', 'c'] for p in permutations(l): query = PropertyInList('methode', list(p)) xml = query.toXML() assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml( '<ogc:Or><ogc:PropertyIsEqualTo><ogc:PropertyName>methode</ogc' ':PropertyName><ogc:Literal>a</ogc:Literal></ogc' ':PropertyIsEqualTo><ogc:PropertyIsEqualTo><ogc:PropertyName' '>methode</ogc:PropertyName><ogc:Literal>b</ogc:Literal></ogc' ':PropertyIsEqualTo><ogc:PropertyIsEqualTo><ogc:PropertyName' '>methode</ogc:PropertyName><ogc:Literal>c</ogc:Literal></ogc' ':PropertyIsEqualTo></ogc:Or>') def test_duplicate(self): """Test the PropertyInList expression with a list containing duplicates. Test whether the generated query is correct and does not contain the duplicate entry twice. """ l = ['a', 'a', 'b', 'c'] l_output = ['a', 'b', 'c'] query = PropertyInList('methode', l) xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}Or' assert len(list(xml)) == 3 for f in xml: assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'methode' literal = f.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l_output.remove(literal.text) assert len(l_output) == 0 def test_list_single(self): """Test the PropertyInList expression with a list containing a single item. Test whether the generated query is correct and does contain only a single PropertyIsEqualTo. """ l = ['a'] query = PropertyInList('methode', l) xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = xml.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'methode' literal = xml.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l.remove(literal.text) assert len(l) == 0 def test_list_single_duplicate(self): """Test the PropertyInList expression with a list containing a single duplicated item. Test whether the generated query is correct and does contain only a single PropertyIsEqualTo. """ l = ['a', 'a'] l_output = ['a'] query = PropertyInList('methode', l) xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = xml.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'methode' literal = xml.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l_output l_output.remove(literal.text) assert len(l_output) == 0 def test_emptylist(self): """Test the PropertyInList expression with an empty list. Test whether a ValueError is raised. """ with pytest.raises(ValueError): l = [] PropertyInList('methode', l) def test_nolist(self): """Test the PropertyInList expression with a string instead of a list. Test whether a ValueError is raised. """ with pytest.raises(ValueError): l = 'goed' PropertyInList('betrouwbaarheid', l) class TestJoin(object): """Test the Join query expression.""" def test(self): """Test the Join expression with a standard dataframe. Test whether the generated query is correct. """ l = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1986-068843'), build_dov_url('data/boring/1980-068861')] df = pd.DataFrame({ 'pkey_boring': pd.Series(l), 'diepte_tot_m': pd.Series([10, 20, 30]) }) query = Join(df, 'pkey_boring') xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}Or' assert len(list(xml)) == 3 for f in xml: assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'pkey_boring' literal = f.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l.remove(literal.text) assert len(l) == 0 def test_duplicate(self): """Test the Join expression with a column containing duplicates. Test whether the generated query is correct and does not contain the duplicate entry twice. """ l = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1980-068861')] l_output = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1980-068861')] df = pd.DataFrame({ 'pkey_boring': pd.Series(l), 'diepte_tot_m': pd.Series([10, 20, 30]) }) query = Join(df, 'pkey_boring') xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}Or' assert len(list(xml)) == 2 for f in xml: assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'pkey_boring' literal = f.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l_output.remove(literal.text) assert len(l_output) == 0 def test_wrongcolumn(self): """Test the Join expression with a join_column not available in the dataframe. Test whether a ValueError is raised. """ with pytest.raises(ValueError): l = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1986-068843'), build_dov_url('data/boring/1980-068861')] df = pd.DataFrame({ 'pkey_boring': pd.Series(l), 'diepte_tot_m': pd.Series([10, 20, 30]) }) Join(df, 'pkey_sondering') def test_single(self): """Test the Join expression with a dataframe containing a single row. Test whether the generated query is correct and does contain only a single PropertyIsEqualTo. """ l = [build_dov_url('data/boring/1986-068853')] df = pd.DataFrame({ 'pkey_boring': pd.Series(l), 'diepte_tot_m': pd.Series([10]) }) query = Join(df, 'pkey_boring') xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = xml.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'pkey_boring' literal = xml.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l.remove(literal.text) assert len(l) == 0 def test_single_duplicate(self): """Test the Join expression with a dataframe containing two identical keys. Test whether the generated query is correct and does contain only a single PropertyIsEqualTo. """ l = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1986-068853')] l_output = [build_dov_url('data/boring/1986-068853')] df = pd.DataFrame({ 'pkey_boring': pd.Series(l), 'diepte_tot_m': pd.Series([10, 20]) }) query = Join(df, 'pkey_boring') xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = xml.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'pkey_boring' literal = xml.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l_output l_output.remove(literal.text) assert len(l_output) == 0 def test_empty(self): """Test the Join expression with an empty dataframe. Test whether a ValueError is raised """ df = pd.DataFrame({ 'pkey_boring': [np.nan, np.nan], 'diepte_tot_m': pd.Series([10, 20]) }) with pytest.raises(ValueError): Join(df, 'pkey_boring') def test_on(self): """Test the Join expression with a standard dataframe and 'on'. Test whether the generated query is correct. """ l = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1986-068843'), build_dov_url('data/boring/1980-068861')] df = pd.DataFrame({ 'pkey_boring': pd.Series(l), 'diepte_tot_m': pd.Series([10, 20, 30]) }) query = Join(df, on='pkey_boring') xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}Or' assert len(list(xml)) == 3 for f in xml: assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'pkey_boring' literal = f.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l.remove(literal.text) assert len(l) == 0 def test_using(self): """Test the Join expression with a standard dataframe and 'on' and 'using'. Test whether the generated query is correct. """ l = [build_dov_url('data/boring/1986-068853'), build_dov_url('data/boring/1986-068843'), build_dov_url('data/boring/1980-068861')] df = pd.DataFrame({ 'boringfiche': pd.Series(l), 'diepte_tot_m': pd.Series([10, 20, 30]) }) query = Join(df, on='pkey_boring', using='boringfiche') xml = query.toXML() assert xml.tag == '{http://www.opengis.net/ogc}Or' assert len(list(xml)) == 3 for f in xml: assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo' propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName') assert propertyname.text == 'pkey_boring' literal = f.find('./{http://www.opengis.net/ogc}Literal') assert literal.text in l l.remove(literal.text) assert len(l) == 0
mit
6,981,466,500,609,612,000
29.688442
79
0.570657
false
tarunlnmiit/CommPy
commpy/utilities.py
1
3297
# Authors: Veeresh Taranalli <veeresht@gmail.com> # License: BSD 3-Clause """ ============================================ Utilities (:mod:`commpy.utilities`) ============================================ .. autosummary:: :toctree: generated/ dec2bitarray -- Integer to binary (bit array). bitarray2dec -- Binary (bit array) to integer. hamming_dist -- Hamming distance. euclid_dist -- Squared Euclidean distance. upsample -- Upsample by an integral factor (zero insertion). """ import numpy as np __all__ = ['dec2bitarray', 'bitarray2dec', 'hamming_dist', 'euclid_dist', 'upsample'] def dec2bitarray(in_number, bit_width): """ Converts a positive integer to NumPy array of the specified size containing bits (0 and 1). Parameters ---------- in_number : int Positive integer to be converted to a bit array. bit_width : int Size of the output bit array. Returns ------- bitarray : 1D ndarray of ints Array containing the binary representation of the input decimal. """ binary_string = bin(in_number) length = len(binary_string) bitarray = np.zeros(bit_width, 'int') for i in xrange(length-2): bitarray[bit_width-i-1] = int(binary_string[length-i-1]) return bitarray def bitarray2dec(in_bitarray): """ Converts an input NumPy array of bits (0 and 1) to a decimal integer. Parameters ---------- in_bitarray : 1D ndarray of ints Input NumPy array of bits. Returns ------- number : int Integer representation of input bit array. """ number = 0 for i in xrange(len(in_bitarray)): number = number + in_bitarray[i]*pow(2, len(in_bitarray)-1-i) return number def hamming_dist(in_bitarray_1, in_bitarray_2): """ Computes the Hamming distance between two NumPy arrays of bits (0 and 1). Parameters ---------- in_bit_array_1 : 1D ndarray of ints NumPy array of bits. in_bit_array_2 : 1D ndarray of ints NumPy array of bits. Returns ------- distance : int Hamming distance between input bit arrays. """ distance = np.bitwise_xor(in_bitarray_1, in_bitarray_2).sum() return distance def euclid_dist(in_array1, in_array2): """ Computes the squared euclidean distance between two NumPy arrays Parameters ---------- in_array1 : 1D ndarray of floats NumPy array of real values. in_array2 : 1D ndarray of floats NumPy array of real values. Returns ------- distance : float Squared Euclidean distance between two input arrays. """ distance = ((in_array1 - in_array2)*(in_array1 - in_array2)).sum() return distance def upsample(x, n): """ Upsample the input array by a factor of n Adds n-1 zeros between consecutive samples of x Parameters ---------- x : 1D ndarray Input array. n : int Upsampling factor Returns ------- y : 1D ndarray Output upsampled array. """ y = np.empty(len(x)*n, dtype=complex) y[0::n] = x zero_array = np.zeros(len(x), dtype=complex) for i in xrange(1, n): y[i::n] = zero_array return y
bsd-3-clause
8,727,163,102,457,859,000
22.055944
85
0.586897
false
luotao1/Paddle
python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py
2
2079
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest def modified_huber_loss_forward(val): if val < -1: return -4. * val elif val < 1: return (1. - val) * (1. - val) else: return 0. class TestModifiedHuberLossOp(OpTest): def setUp(self): self.op_type = 'modified_huber_loss' samples_num = 100 x_np = np.random.uniform(-2., 2., (samples_num, 1)).astype('float32') y_np = np.random.choice([0, 1], samples_num).reshape( (samples_num, 1)).astype('float32') product_res = x_np * (2. * y_np - 1.) # keep away from the junction of piecewise function for pos, val in np.ndenumerate(product_res): while abs(val - 1.) < 0.05: x_np[pos] = np.random.uniform(-2., 2.) y_np[pos] = np.random.choice([0, 1]) product_res[pos] = x_np[pos] * (2 * y_np[pos] - 1) val = product_res[pos] self.inputs = {'X': x_np, 'Y': y_np} loss = np.vectorize(modified_huber_loss_forward)(product_res) self.outputs = { 'IntermediateVal': product_res.astype('float32'), 'Out': loss.reshape((samples_num, 1)).astype('float32') } def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') if __name__ == '__main__': unittest.main()
apache-2.0
-2,280,570,360,248,085,800
31.484375
77
0.607023
false
simomarsili/ndd
ndd/divergence.py
1
2979
# -*- coding: utf-8 -*- # Author: Simone Marsili # License: BSD 3 clause # pylint: disable=c-extension-no-member """Compute divergences between distributions.""" import logging from abc import ABC, abstractmethod import numpy from numpy import PZERO # pylint: disable=no-name-in-module from ndd.estimators import EntropyEstimator, Nsb from ndd.estimators import estimators as entropy_estimators from ndd.estimators import fit_function from ndd.exceptions import NddError from ndd.utils import register_class __all__ = ['DivergenceEstimator', 'JsDivergence'] logger = logging.getLogger(__name__) estimators = {} class DivergenceEstimatorType(type(EntropyEstimator)): """Metaclass for entropy estimators.""" def __new__(cls, name, bases, namespace, **kwargs): estimator_class = type.__new__(cls, name, bases, namespace, **kwargs) register_class(estimator_class, estimators) return estimator_class class DivergenceEstimator(EntropyEstimator, ABC, metaclass=DivergenceEstimatorType): """Base class for estimators of divergences.""" def __init__(self, entropy=Nsb()): """Default entropy estimator is NSB.""" super(DivergenceEstimator, self).__init__() self.input_data_ndim = 2 estimator_name = type(entropy).__name__ if estimator_name not in entropy_estimators: raise NddError('%s is not a valid entropy estimator' % estimator_name) self.entropy_estimator = entropy @property def algorithm(self): """Estimator function name.""" return self.entropy_estimator.__class__.__name__ @abstractmethod def fit(self, nk, k=None, zk=None): """ Parameters ---------- nk : array_like n-by-p array. Different rows correspond to counts from different distributions with the same discrete sample space. k : int, optional Number of bins. k >= p if nk is n-by-p. Float values are valid input for whole numbers (e.g. k=1.e3). Defaults to nk.shape[1]. Returns ------- self : object Returns the instance itself. Raises ------ CountsError If nk is not a 2D array. """ class JsDivergence(DivergenceEstimator): """Jensen-Shannon divergence estimator. Parameters ---------- entropy_estimator : EntropyEstimator object """ @fit_function def fit(self, nk, k=None, zk=None): ws = numpy.float64(nk.sum(axis=1)) ws /= ws.sum() if k is None: k = nk.shape[1] if k == 1: # single bin return PZERO self.estimate_ = (self.entropy_estimator(nk.sum(axis=0), k=k) - sum(ws[i] * self.entropy_estimator(x, k=k) for i, x in enumerate(nk))) return self
bsd-3-clause
-7,343,007,803,359,725,000
27.92233
77
0.596845
false
QudevETH/PycQED_py3
pycqed/measurement/waveform_control/viewer.py
1
6469
# module for visualizing sequences. # # author: Wolfgang Pfaff # modified by: Adriaan Rol and Ramiro Sagastizabal import numpy as np from matplotlib import pyplot as plt from qcodes.plots.pyqtgraph import QtPlot from qcodes.plots.colors import color_cycle def show_element_dclab(element, delay=True, channels='all', ax=None): if ax is None: add_extra_labs = True fig, ax = plt.subplots(1, 1, figsize=(16, 8)) else: # prevents super long legends if plots are combined add_extra_labs = False axs2 = ax.twinx() colors_dict = {'ch1': 'red', 'ch1_marker1': 'orangered', 'ch1_marker2': 'darkred', 'ch2': 'gold', 'ch2_marker1': 'orange', 'ch2_marker2': 'yellow', 'ch3': 'green', 'ch3_marker1': 'lime', 'ch3_marker2': 'turquoise', 'ch4': 'darkblue', 'ch4_marker1': 'indigo', 'ch4_marker2': 'navy'} t_vals, outputs_dict = element.waveforms() for key in outputs_dict: if 'marker' in key: axs2.plot( t_vals[key]*1e9, outputs_dict[key], label=key, color=colors_dict[key]) else: ax.plot( t_vals[key]*1e9, outputs_dict[key], label=key, color=colors_dict[key]) ax.set_xlabel('Time (ns)') ax.set_ylabel('Analog output (V)') if add_extra_labs: # only set it once otherwise we end up with 20 labels axs2.set_ylabel('Marker output (V)') hi = element.pulsar.channels['ch1']['high'] lo = element.pulsar.channels['ch1']['low'] ax.set_ylim(lo-0.1*(hi-lo), hi+0.1*(hi-lo)) hi = element.pulsar.channels['ch1_marker1']['high'] lo = element.pulsar.channels['ch1_marker1']['low'] axs2.set_ylim(lo-0.1*(hi-lo), hi+0.1*(hi-lo)) ax.set_xlim(t_vals.min()*1e9, t_vals.max()*1e9) if add_extra_labs: ax.legend(loc='best') return ax def show_element_pyqt(element, QtPlot_win=None, color_idx=None, channels=['ch1', 'ch2', 'ch3', 'ch4']): if QtPlot_win is None: QtPlot_win = QtPlot(windowTitle='Seq_plot', figsize=(600, 400)) # FIXME: Add a legend t_vals, outputs_dict = element.waveforms() if type(channels) == str: channels = [channels] t_vals = t_vals xlabel = 'Time' xunit = 's' yunit = 'V' for i, ch in enumerate(channels): ylabel = 'Output ch {}'.format(ch) if color_idx == None: color = color_cycle[i % len(color_cycle)] else: color = color_cycle[color_idx] if i+1 > len(QtPlot_win.subplots): QtPlot_win.win.nextRow() QtPlot_win.add( x=t_vals[ch], y=outputs_dict[ch], name=ch, color=color, subplot=i+1, symbol='o', symbolSize=5, xlabel=xlabel, xunit=xunit, ylabel=ylabel, yunit=yunit) else: QtPlot_win.add( x=t_vals[ch], y=outputs_dict[ch], name=ch, color=color, subplot=i+1, symbol='o', symbolSize=5, xlabel=xlabel, xunit=xunit, ylabel=ylabel, yunit=yunit) # links all the x-axes p0 = QtPlot_win.subplots[0] for j, p in enumerate(QtPlot_win.subplots): if j > 0: p.setXLink(p0) return QtPlot_win def show_wf(tvals, wf, name='', ax=None, ret=None, dt=None): if ax is None: fig = plt.figure() ax = fig.add_subplot(111) if dt is None: dt = tvals[1]-tvals[0] ax.plot(tvals, wf, ls='-', marker='.') ax.set_xlim(tvals[0], 2*tvals[-1]-tvals[-2]) ax.set_ylabel(name + ' Amplitude') if ret == 'ax': return ax else: return None def show_element(element, delay=True, channels='all'): tvals, wfs = element.waveforms() if channels == 'all': cnt = len(wfs) else: cnt = len(channels) i = 0 fig, axs = plt.subplots(cnt, 1, sharex=True) t0 = 0 t1 = 0 for wf in wfs: if channels == 'all' or wf in channels: i += 1 hi = element.pulsar.channels[wf]['high'] lo = element.pulsar.channels[wf]['low'] # some prettifying ax = axs[i-1] ax.set_axis_bgcolor('gray') ax.axhspan(lo, hi, facecolor='w', linewidth=0) # the waveform if delay: t = tvals[wf] else: t = element.real_times(tvals[wf], wf) t0 = min(t0, t[0]) t1 = max(t1, t[-1]) # TODO style options show_wf(t, wfs[wf], name=wf, ax=ax, dt=1./element._clock(wf)) ax.set_ylim(lo*1.1, hi*1.1) if i == cnt: ax.set_xlabel('Time') ax.set_xlim(t0, t1) def show_fourier_of_element_channels(element, channels, unit='Hz'): ''' Shows a fourier transform of a waveform. element : from which a waveform needs to be displayed channels (str): names of the channels on which the waveform is defined in time domain. If the lenght of the channels is 2 it interprets the first as being the I quadrature and the second as the q quadrature. ''' tvals, wfs = element.waveforms() fig, ax = plt.subplots(1, 1) dt = tvals[channels[0]][1] - tvals[channels[0]][0] for i in range(1, len(channels)): if dt != tvals[channels[i]][1] - tvals[channels[i]][0]: raise ValueError('different clock signals not supported') if len(channels) == 2: compl_data = wfs[channels[0]] + 1j * wfs[channels[1]] trans_dat = np.fft.fft(compl_data)*dt n = len(compl_data) elif len(channels) == 1: trans_dat = np.fft.fft(wfs[channels[0]])*dt n = len(wfs[channels[0]]) else: trans_dat = np.fft.fft(wfs[channels])*dt n = len(wfs[channels]) freqs = np.fft.fftfreq(n, d=dt) if unit == 'MHz': freqs *= 1e-6 elif unit == 'GHz': freqs *= 1e-9 elif unit == 'Hz': pass else: raise Exception('units "{}" not recognized, valid options' ' are GHz, MHz and Hz'.format(unit)) ax.plot(freqs, trans_dat, ls='-o', marker='.') ax.set_xlabel('Frequency ({})'.format(unit))
mit
1,288,893,208,470,041,600
31.18408
86
0.531767
false
simphony/simphony-common
simphony/io/data_container_table.py
1
5767
from collections import MutableMapping import uuid import numpy import tables from .data_container_description import Record from .data_conversion import (convert_from_file_type, convert_to_file_type) from ..core import CUBA from ..core import DataContainer class DataContainerTable(MutableMapping): """ A proxy class to an HDF5 group node with serialised DataContainers. The class implements the Mutable-Mapping api where each DataContainer instance is mapped to uuid. """ @property def valid(self): """ A PyTables table is opened/created and the object is valid. """ return self._table is not None def __init__(self, root, name='data_containers', record=None): """ Create a proxy object for an HDF5 backed data container table. Parameters ---------- root : tables.Group The root node where to add the data container table structures. name : string The name of the new group that will be created. Default name is 'data_containers' record : table.IsDescription The table columns description to use. Default is to use the main data_container record if a new table needs to be created or the already existing record if a table already exists in file. """ handle = root._v_file self._parent = parent = root if hasattr(parent, name): self._table = getattr(parent, name) else: if record is None: record = Record self._table = handle.create_table(parent, name, record) # Prepare useful mappings columns = self._table.cols.data._v_desc._v_colobjects members = CUBA.__members__ self._cuba_to_position = { cuba: columns[member.lower()]._v_pos for member, cuba in members.items() if member.lower() in columns} self._position_to_cuba = { columns[member.lower()]._v_pos: cuba for member, cuba in members.items() if member.lower() in columns} def append(self, data): """ Append the data to the end of the table. Parameters ---------- data : DataContainer The DataContainer instance to save. Returns ------- uid : uuid.UUID The index of the saved row. """ table = self._table uid = uuid.uuid4() row = table.row row['index'] = uid.hex self._populate(row, data) row.append() table.flush() return uid def __getitem__(self, uid): """ Return the DataContainer in row. """ for row in self._table.where( 'index == value', condvars={'value': uid.hex}): return self._retrieve(row) else: raise KeyError( 'Record (id={id}) does not exist'.format(id=uid)) def __setitem__(self, uid, data): """ Set the data in row from the DataContainer. """ table = self._table for row in table.where( 'index == value', condvars={'value': uid.hex}): self._populate(row, data) row.update() # see https://github.com/PyTables/PyTables/issues/11 row._flush_mod_rows() return else: row = table.row row['index'] = uid.hex self._populate(row, data) row.append() table.flush() def __delitem__(self, uid): """ Delete the row. """ table = self._table for row in table.where( 'index == value', condvars={'value': uid.hex}): if table.nrows == 1: name = table._v_name record = table.description # pytables due to hdf5 limitations does # not support removing the last row of table # so we delete the table and # create new empty table in this situation table.remove() parent = self._parent self._table = tables.Table(parent, name, record) else: table.remove_row(row.nrow) break else: raise KeyError( 'Record (id={id}) does not exist'.format(id=uid)) def __len__(self): """ The number of rows in the table. """ return self._table.nrows def itersequence(self, sequence): """ Iterate over a sequence of row ids. """ for uid in sequence: yield self.__getitem__(uid) def __iter__(self): """ Iterate over all the rows """ for row in self._table: yield self._retrieve(row) def _populate(self, row, value): """ Populate the row from the DataContainer. """ positions = self._cuba_to_position mask = numpy.zeros( shape=self._table.coldtypes['mask'].shape, dtype=numpy.bool) data = list(row['data']) for key in value: if key in positions: data[positions[key]] = convert_to_file_type(value[key], key) mask[positions[key]] = True row['mask'] = mask row['data'] = tuple(data) def _retrieve(self, row): """ Return the DataContainer from a table row instance. """ cuba = self._position_to_cuba mask = row['mask'] data = row['data'] return DataContainer({ cuba[index]: convert_from_file_type(data[index], cuba[index]) for index, valid in enumerate(mask) if valid})
bsd-2-clause
-2,107,181,433,144,807,200
29.036458
76
0.537715
false
jor-/scipy
scipy/ndimage/_ni_support.py
2
3287
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import numpy from scipy._lib.six import string_types def _extend_mode_to_code(mode): """Convert an extension mode to the corresponding integer code. """ if mode == 'nearest': return 0 elif mode == 'wrap': return 1 elif mode == 'reflect': return 2 elif mode == 'mirror': return 3 elif mode == 'constant': return 4 else: raise RuntimeError('boundary mode not supported') def _normalize_sequence(input, rank): """If input is a scalar, create a sequence of length equal to the rank by duplicating the input. If input is a sequence, check if its length is equal to the length of array. """ is_str = isinstance(input, string_types) if hasattr(input, '__iter__') and not is_str: normalized = list(input) if len(normalized) != rank: err = "sequence argument must have length equal to input rank" raise RuntimeError(err) else: normalized = [input] * rank return normalized def _get_output(output, input, shape=None): if shape is None: shape = input.shape if output is None: output = numpy.zeros(shape, dtype=input.dtype.name) elif isinstance(output, (type, numpy.dtype)): # Classes (like `np.float32`) and dtypes are interpreted as dtype output = numpy.zeros(shape, dtype=output) elif isinstance(output, string_types): output = numpy.typeDict[output] output = numpy.zeros(shape, dtype=output) elif output.shape != shape: raise RuntimeError("output shape not correct") return output def _check_axis(axis, rank): if axis < 0: axis += rank if axis < 0 or axis >= rank: raise ValueError('invalid axis') return axis
bsd-3-clause
5,395,190,904,796,023,000
34.728261
74
0.691816
false
vortex-ape/scikit-learn
examples/gaussian_process/plot_gpr_co2.py
2
6516
""" ======================================================== Gaussian process regression (GPR) on Mauna Loa CO2 data. ======================================================== This example is based on Section 5.4.3 of "Gaussian Processes for Machine Learning" [RW2006]. It illustrates an example of complex kernel engineering and hyperparameter optimization using gradient ascent on the log-marginal-likelihood. The data consists of the monthly average atmospheric CO2 concentrations (in parts per million by volume (ppmv)) collected at the Mauna Loa Observatory in Hawaii, between 1958 and 2001. The objective is to model the CO2 concentration as a function of the time t. The kernel is composed of several terms that are responsible for explaining different properties of the signal: - a long term, smooth rising trend is to be explained by an RBF kernel. The RBF kernel with a large length-scale enforces this component to be smooth; it is not enforced that the trend is rising which leaves this choice to the GP. The specific length-scale and the amplitude are free hyperparameters. - a seasonal component, which is to be explained by the periodic ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale of this periodic component, controlling its smoothness, is a free parameter. In order to allow decaying away from exact periodicity, the product with an RBF kernel is taken. The length-scale of this RBF component controls the decay time and is a further free parameter. - smaller, medium term irregularities are to be explained by a RationalQuadratic kernel component, whose length-scale and alpha parameter, which determines the diffuseness of the length-scales, are to be determined. According to [RW2006], these irregularities can better be explained by a RationalQuadratic than an RBF kernel component, probably because it can accommodate several length-scales. - a "noise" term, consisting of an RBF kernel contribution, which shall explain the correlated noise components such as local weather phenomena, and a WhiteKernel contribution for the white noise. The relative amplitudes and the RBF's length scale are further free parameters. Maximizing the log-marginal-likelihood after subtracting the target's mean yields the following kernel with an LML of -83.214:: 34.4**2 * RBF(length_scale=41.8) + 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44, periodicity=1) + 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957) + 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336) Thus, most of the target signal (34.4ppm) is explained by a long-term rising trend (length-scale 41.8 years). The periodic component has an amplitude of 3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay time indicates that we have a locally very close to periodic seasonal component. The correlated noise has an amplitude of 0.197ppm with a length scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the overall noise level is very small, indicating that the data can be very well explained by the model. The figure shows also that the model makes very confident predictions until around 2015. """ # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # # License: BSD 3 clause from __future__ import division, print_function import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import fetch_openml from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels \ import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared try: from urllib.request import urlopen except ImportError: # Python 2 from urllib2 import urlopen print(__doc__) def load_mauna_loa_atmospheric_co2(): ml_data = fetch_openml(data_id=41187) months = [] ppmv_sums = [] counts = [] y = ml_data.data[:, 0] m = ml_data.data[:, 1] month_float = y + (m - 1) / 12 ppmvs = ml_data.target for month, ppmv in zip(month_float, ppmvs): if not months or month != months[-1]: months.append(month) ppmv_sums.append(ppmv) counts.append(1) else: # aggregate monthly sum to produce average ppmv_sums[-1] += ppmv counts[-1] += 1 months = np.asarray(months).reshape(-1, 1) avg_ppmvs = np.asarray(ppmv_sums) / counts return months, avg_ppmvs X, y = load_mauna_loa_atmospheric_co2() # Kernel with parameters given in GPML book k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend k2 = 2.4**2 * RBF(length_scale=90.0) \ * ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component # medium term irregularity k3 = 0.66**2 \ * RationalQuadratic(length_scale=1.2, alpha=0.78) k4 = 0.18**2 * RBF(length_scale=0.134) \ + WhiteKernel(noise_level=0.19**2) # noise terms kernel_gpml = k1 + k2 + k3 + k4 gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0, optimizer=None, normalize_y=True) gp.fit(X, y) print("GPML kernel: %s" % gp.kernel_) print("Log-marginal-likelihood: %.3f" % gp.log_marginal_likelihood(gp.kernel_.theta)) # Kernel with optimized parameters k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend k2 = 2.0**2 * RBF(length_scale=100.0) \ * ExpSineSquared(length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed") # seasonal component # medium term irregularities k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0) k4 = 0.1**2 * RBF(length_scale=0.1) \ + WhiteKernel(noise_level=0.1**2, noise_level_bounds=(1e-3, np.inf)) # noise terms kernel = k1 + k2 + k3 + k4 gp = GaussianProcessRegressor(kernel=kernel, alpha=0, normalize_y=True) gp.fit(X, y) print("\nLearned kernel: %s" % gp.kernel_) print("Log-marginal-likelihood: %.3f" % gp.log_marginal_likelihood(gp.kernel_.theta)) X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis] y_pred, y_std = gp.predict(X_, return_std=True) # Illustration plt.scatter(X, y, c='k') plt.plot(X_, y_pred) plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std, alpha=0.5, color='k') plt.xlim(X_.min(), X_.max()) plt.xlabel("Year") plt.ylabel(r"CO$_2$ in ppm") plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa") plt.tight_layout() plt.show()
bsd-3-clause
-2,157,258,615,484,727,000
39.222222
79
0.692756
false
GiggleLiu/dmrg
dmrg.py
1
37043
''' DMRG Engine. ''' from numpy import * from scipy.sparse.linalg import eigsh from scipy.linalg import eigh,svd,eigvalsh from numpy.linalg import norm from numpy import kron as dkron from matplotlib.pyplot import * import scipy.sparse as sps import copy,time,pdb,warnings,numbers from blockmatrix.blocklib import eigbsh,eigbh,get_blockmarker,svdb from tba.hgen import SpinSpaceConfig,ind2c,Z4scfg from rglib.mps import MPS,OpString,tensor,insert_Zs from rglib.hexpand import NullEvolutor,MaskedEvolutor from tba.hgen import kron_csr as kron from blockmatrix import SimpleBMG,sign4bm,show_bm,trunc_bm from disc_symm import SymmetryHandler from superblock import SuperBlock,site_image,joint_extract_block from pydavidson import JDh from flib.flib import fget_subblock_dmrg __all__=['site_image','SuperBlock','DMRGEngine','fix_tail'] ZERO_REF=1e-12 def _eliminate_zeros(A,zero_ref): '''eliminate zeros from a sparse matrix.''' if not isinstance(A,sps.csr_matrix): A=A.tocsr() A.data[abs(A.data)<zero_ref]=0; A.eliminate_zeros() return A def _gen_hamiltonian_full(HL0,HR0,hgen_l,hgen_r,interop): '''Get the full hamiltonian.''' ndiml,ndimr=HL0.shape[0],HR0.shape[0] H1,H2=kron(HL0,sps.identity(ndimr)),kron(sps.identity(ndiml),HR0) H=H1+H2 #get the link hamiltonians sb=SuperBlock(hgen_l,hgen_r) Hin=[] for op in interop: Hin.append(sb.get_op(op)) H=H+sum(Hin) H=_eliminate_zeros(H,ZERO_REF) return H def _gen_hamiltonian_block0(HL0,HR0,hgen_l,hgen_r,interop,blockinfo): '''Get the combined hamiltonian for specific block.''' ndiml,ndimr=HL0.shape[0],HR0.shape[0] bml,bmr,pml,pmr,bmg,target_block=blockinfo['bml'],blockinfo['bmr'],blockinfo['pml'],blockinfo['pmr'],blockinfo['bmg'],blockinfo['target_block'] bm_tot,pm=bmg.join_bms([bml,bmr]).compact_form() pm=((pml*len(pmr))[:,newaxis]+pmr).ravel()[pm] t0=time.time() H1,H2=kron(HL0,sps.identity(ndimr)),kron(sps.identity(ndiml),HR0) t1=time.time() indices=pm[bm_tot.get_slice(target_block,uselabel=True)] H1,H2=H1.tocsr()[indices][:,indices],H2.tocsr()[indices][:,indices] Hc=H1+H2 sb=SuperBlock(hgen_l,hgen_r) for op in interop: Hc=Hc+(sb.get_op(op)).tocsr()[indices][:,indices] t2=time.time() print 'Generate Hamiltonian %s, %s'%(t1-t0,t2-t1) return Hc,bm_tot,pm def _gen_hamiltonian_block(HL0,HR0,hgen_l,hgen_r,interop,blockinfo): '''Get the combined hamiltonian for specific block.''' ndiml,ndimr=HL0.shape[0],HR0.shape[0] bm_tot,pm=blockinfo['bmg'].join_bms([blockinfo['bml'],blockinfo['bmr']]).compact_form() pm=((blockinfo['pml']*ndimr)[:,newaxis]+blockinfo['pmr']).ravel()[pm] indices=pm[bm_tot.get_slice(blockinfo['target_block'],uselabel=True)] cinds=ind2c(indices,N=[ndiml,ndimr]) t0=time.time() H1=fget_subblock_dmrg(hl=HL0.toarray(),hr=identity(ndimr),indices=cinds,is_identity=2) H2=fget_subblock_dmrg(hl=identity(ndiml),hr=HR0.toarray(),indices=cinds,is_identity=1) Hc=H1+H2 t1=time.time() sb=SuperBlock(hgen_l,hgen_r) for op in interop: Hc=Hc+sb.get_op(op,indices=cinds) t2=time.time() print 'Generate Hamiltonian %s, %s'%(t1-t0,t2-t1) return sps.csr_matrix(Hc),bm_tot,pm def _get_mps(hgen_l,hgen_r,phi,direction,labels): '''Combining hgen_l and hgen_r to get the matrix product state.''' NL,NR=hgen_l.N,hgen_r.N phi=tensor.Tensor(phi,labels=['al','sl+1','al+2','sl+2']) #l=NL-1 if direction=='->': A=hgen_l.evolutor.A(NL-1,dense=True) #get A[sNL](NL-1,NL) A=tensor.Tensor(A,labels=['sl+1','al','al+1\'']) phi=tensor.contract([A,phi]) phi=phi.chorder([0,2,1]) #now we get phi(al+1,sl+2,al+2) #decouple phi into S*B, B is column-wise othorgonal U,S,V=svd(phi.reshape([phi.shape[0],-1]),full_matrices=False) U=tensor.Tensor(U,labels=['al+1\'','al+1']) A=(A*U) #get A(al,sl+1,al+1) B=transpose(V.reshape([S.shape[0],phi.shape[1],phi.shape[2]]),axes=(1,2,0)) #al+1,sl+2,al+2 -> sl+2,al+2,al+1, stored in column wise othorgonal format else: B=hgen_r.evolutor.A(NR-1,dense=True) #get B[sNR](NL+1,NL+2) B=tensor.Tensor(B,labels=['sl+2','al+2','al+1\'']).conj() #!the conjugate? phi=tensor.contract([phi,B]) #decouple phi into A*S, A is row-wise othorgonal U,S,V=svd(phi.reshape([phi.shape[0]*phi.shape[1],-1]),full_matrices=False) V=tensor.Tensor(V,labels=['al+1','al+1\'']) B=(V*B).chorder([1,2,0]).conj() #al+1,sl+2,al+2 -> sl+2,al+2,al+1, for B is in transposed order by default. A=transpose(U.reshape([phi.shape[0],phi.shape[1],S.shape[0]]),axes=(1,0,2)) #al,sl+1,al+1 -> sl+1,al,al+1, stored in column wise othorgonal format AL=hgen_l.evolutor.get_AL(dense=True)[:-1]+[A] BL=[B]+hgen_r.evolutor.get_AL(dense=True)[::-1][1:] AL=[transpose(ai,axes=(1,0,2)) for ai in AL] BL=[transpose(bi,axes=(1,0,2)).conj() for bi in BL] #transpose mps=MPS(AL=AL,BL=BL,S=S,labels=labels,forder=range(NL)+range(NL,NL+NR)[::-1]) return mps class DMRGEngine(object): ''' DMRG Engine. Attributes: :hgen: <ExpandGenerator>, hamiltonian Generator. :bmg: <BlockMarkerGenerator>, the block marker generator. :tol: float, the tolerence, when maxN and tol are both set, we keep the lower dimension. :reflect: bool, True if left<->right reflect, can be used to shortcut the run time. :eigen_solver: str, * 'JD', Jacobi-Davidson iteration. * 'LC', Lanczos, algorithm. :iprint: int, the redundency level of output information, 0 for None, 10 for debug. :symm_handler: <SymmetryHandler>, the discrete symmetry handler. :LPART/RPART: dict, the left/right sweep of hamiltonian generators. :_tails(private): list, the last item of A matrices, which is used to construct the <MPS>. ''' def __init__(self,hgen,tol=0,reflect=False,eigen_solver='LC',iprint=1): self.tol=tol self.hgen=hgen self.eigen_solver=eigen_solver #the symmetries self.reflect=reflect self.bmg=None self._target_block=None self.symm_handler=SymmetryHandler({},detect_scope=1) #claim attributes with dummy values. self._tails=None self.LPART=None self.RPART=None self.iprint=iprint #status self.status={'isweep':0,'direction':'->','pos':0} def _eigsh(self,H,v0,projector=None,tol=1e-10,sigma=None,lc_search_space=1,k=1): ''' solve eigenvalue problem. ''' maxiter=5000 N=H.shape[0] if self.iprint==10 and projector is not None and check_commute: assert(is_commute(H,projector)) if self.eigen_solver=='LC': k=max(lc_search_space,k) if H.shape[0]<100: e,v=eigh(H.toarray()) e,v=e[:k],v[:,:k] else: try: e,v=eigsh(H,k=k,which='SA',maxiter=maxiter,tol=tol,v0=v0) except: e,v=eigsh(H,k=k+1,which='SA',maxiter=maxiter,tol=tol,v0=v0) order=argsort(e) e,v=e[order],v[:,order] else: iprint=0 maxiter=500 if projector is not None: e,v=JDh(H,v0=v0,k=k,projector=projector,tol=tol,maxiter=maxiter,sigma=sigma,which='SA',iprint=iprint) else: if sigma is None: e,v=JDh(H,v0=v0,k=max(lc_search_space,k),projector=projector,tol=tol,maxiter=maxiter,which='SA',iprint=iprint) else: e,v=JDh(H,v0=v0,k=k,projector=projector,tol=tol,sigma=sigma,which='SL',\ iprint=iprint,converge_bound=1e-10,maxiter=maxiter) nstate=len(e) if nstate==0: raise Exception('No Converged Pair!!') elif nstate==k or k>1: return e,v #filter out states meeting projector. if projector is not None and lc_search_space!=1: overlaps=array([abs(projector.dot(v[:,i]).conj().dot(v[:,i])) for i in xrange(nstate)]) mask0=overlaps>0.1 if not any(mask0): raise Exception('Can not find any states meeting specific parity!') mask=overlaps>0.9 if sum(mask)==0: #check for degeneracy. istate=where(mask0)[0][0] warnings.warn('Wrong result or degeneracy accur!') else: istate=where(mask)[0][0] v=projector.dot(v[:,istate:istate+1]) v=v/norm(v) return e[istate:istate+1],v else: #get the state with maximum overlap. v0H=v0.conj()/norm(v0) overlaps=array([abs(v0H.dot(v[:,i])) for i in xrange(nstate)]) istate=argmax(overlaps) if overlaps[istate]<0.7: warnings.warn('Do not find any states same correspond to the one from last iteration!%s'%overlaps) e,v=e[istate:istate+1],v[:,istate:istate+1] return e,v @property def nsite(self): '''Number of sites''' return self.hgen.nsite def query(self,which,length): ''' Query the hamiltonian generator of specific part. which: `l` -> the left part. `r` -> the right part. length: The length of block. ''' assert(which=='l' or which=='r') if which=='l' or self.reflect: return copy.copy(self.LPART[length]) else: return copy.copy(self.RPART[length]) def set(self,which,hgen,length=None): ''' Set the hamiltonian generator for specific part. Parameters: :which: str, * `l` -> the left part. * `r` -> the right part. :hgen: <ExpandGenerator>, the RG hamiltonian generator. :length: int, the length of block, if set, it will do a length check. ''' assert(length is None or length==hgen.N) assert(hgen.truncated) if which=='l' or self.reflect: self.LPART[hgen.N]=hgen else: self.RPART[hgen.N]=hgen def reset(self): '''Restore this engine to initial status.''' #we insert Zs into operator collections to cope with fermionic sign problem. #and use site image to create a reversed ordering! hgen_l=copy.deepcopy(self.hgen) if not isinstance(hgen_l.spaceconfig,SpinSpaceConfig): insert_Zs(hgen_l.evolutees['H'].opc,spaceconfig=hgen_l.spaceconfig) self.LPART={0:hgen_l} if not self.reflect: hgen_r=copy.deepcopy(self.hgen) hgen_r.evolutees['H'].opc=site_image(hgen_r.evolutees['H'].opc,NL=0,NR=hgen_r.nsite,care_sign=True) if not isinstance(hgen_l.spaceconfig,SpinSpaceConfig): insert_Zs(hgen_r.evolutees['H'].opc,spaceconfig=hgen_r.spaceconfig) self.RPART={0:hgen_r} def use_disc_symmetry(self,target_sector,detect_scope=2): ''' Use specific discrete symmetry. Parameters: :target_sector: dict, {name:parity} pairs. :detect_scope: ''' if target_sector.has_key('C') and not self.reflect: raise Exception('Using C2 symmetry without reflection symmetry is unreliable, forbiden for safety!') symm_handler=SymmetryHandler(target_sector,detect_scope=detect_scope) if target_sector.has_key('P'): #register flip evolutee. handler=symm_handler.handlers['P'] self.hgen.register_evolutee('P',opc=prod([handler.P(i) for i in xrange(self.hgen.nsite)]),initial_data=sps.identity(1)) if target_sector.has_key('J'): #register p-h evolutee. handler=symm_handler.handlers['J'] self.hgen.register_evolutee('J',opc=prod([handler.J(i) for i in xrange(self.hgen.nsite)]),initial_data=sps.identity(1)) self.symm_handler=symm_handler def use_U1_symmetry(self,qnumber,target_block): ''' Use specific U1 symmetry. ''' self.bmg=SimpleBMG(spaceconfig=self.hgen.spaceconfig,qstring=qnumber) self._target_block=target_block @property def target_block(self): '''Get the target block.''' target_block=self._target_block if hasattr(target_block,'__call__'): n,pos=self.status['isweep'],self.status['pos'] nsite=self.nsite if n==0 and pos<nsite/2: nsite=pos*2 target_block=target_block(nsite=nsite) return target_block def run_finite(self,endpoint=None,tol=0,maxN=20,nlevel=1,call_before=None,call_after=None): ''' Run the application. Parameters: :endpoint: tuple, the end position tuple of (sweep, direction, size of left-block). :tol: float, the rolerence of energy. :maxN: int, maximum number of kept states and the tolerence for truncation weight. :nlevel: int, the number of desired energy levels. :call_before/call_after: function/None, the function to call back before/after each iteration, using `DMRGEngine` as an parameter. Return: tuple, the ground state energy and the ground state(in <MPS> form). ''' EL=[] #check the validity of datas. if isinstance(self.hgen.evolutor,NullEvolutor): raise ValueError('The evolutor must not be null!') if not self.symm_handler==None and nlevel!=1: raise NotImplementedError('The symmetric Handler can not be used in multi-level calculation!') if not self.symm_handler==None and self.bmg is None: raise NotImplementedError('The symmetric Handler can not without Block marker generator!') self.reset() nsite=self.hgen.nsite if endpoint is None: endpoint=(4,'<-',0) maxsweep,end_direction,end_site=endpoint if ndim(maxN)==0: maxN=[maxN]*maxsweep assert(len(maxN)>=maxsweep and end_site<=(nsite-2 if not self.reflect else nsite/2-2)) EG_PRE=Inf initial_state=None if self.reflect: iterators={'->':xrange(nsite/2),'<-':xrange(nsite/2-2,-1,-1)} else: iterators={'->':xrange(nsite-1),'<-':xrange(nsite-2,-1,-1)} for n,m in enumerate(maxN): for direction in ['->','<-']: for i in iterators[direction]: print 'Running %s-th sweep, iteration %s'%(n+1,i) t0=time.time() self.status.update({'isweep':n,'pos':i+1,'direction':direction}) if call_before is not None: call_before(self) #setup generators and operators. #The cases to use identical hamiltonian generator, #1. the first half of first sweep. #2. the reflection is used and left block is same length with right block. hgen_l=self.query('l',i) if (n==0 and direction=='->' and i<(nsite+1)/2) or (self.reflect and i==(nsite/2-1) and nsite%2==0): hgen_r=hgen_l else: hgen_r=self.query('r',nsite-i-2) print 'A'*hgen_l.N+'..'+'B'*hgen_r.N nsite_true=hgen_l.N+hgen_r.N+2 #run a step if n<=2: e_estimate=None else: e_estimate=EG[0] EG,err,phil=self.dmrg_step(hgen_l,hgen_r,tol=tol,maxN=m, initial_state=initial_state,e_estimate=e_estimate,nlevel=nlevel) #update LPART and RPART print 'setting %s-site of left and %s-site of right.'%(hgen_l.N,hgen_r.N) self.set('l',hgen_l,hgen_l.N) print 'set L = %s, size %s'%(hgen_l.N,hgen_l.ndim) if hgen_l is not hgen_r or (not self.reflect and n==0 and i<nsite/2): #Note: Condition for setting up the right block, #1. when the left and right part are not the same one. #2. when the block has not been expanded to full length and not reflecting. self.set('r',hgen_r,hgen_r.N) print 'set R = %s, size %s'%(hgen_r.N,hgen_r.ndim) if call_after is not None: call_after(self) #do state prediction initial_state=None #restore initial state. phi=phil[0] if nsite==nsite_true: if self.reflect and nsite%2==0 and (i==nsite/2-2 and direction=='->'): #Prediction can not be used: #when we are going to calculate the symmetry point #and use the reflection symmetry. #for the right block is instantly replaced by another hamiltonian generator, #which is not directly connected to the current hamiltonian generator. initial_state=sum([self.state_prediction(phi,l=i+1,direction=direction) for phi in phil],axis=0).ravel() elif direction=='->' and i==nsite-2: #for the case without reflection. initial_state=phil[0].ravel() elif direction=='<-' and i==0: initial_state=phil[0].ravel() else: if self.reflect and direction=='->' and i==nsite/2-1: direction='<-' #the turning point of where reflection used. initial_state=sum([self.state_prediction(phi,l=i+1,direction=direction) for phi in phil],axis=0) initial_state=initial_state.ravel() if len(EL)>0: diff=EG-EL[-1] else: diff=Inf t1=time.time() print 'EG = %s, dE = %s, Elapse -> %.2f, TruncError -> %s'%(EG,diff,t1-t0,err) EL.append(EG) if i==end_site and direction==end_direction: diff=EG-EG_PRE print 'MidPoint -> EG = %s, dE = %s'%(EG,diff) if n==maxsweep-1: print 'Breaking due to maximum sweep reached!' return EG,self.get_mps(phi=phil[0],l=i+1,direction=direction) else: EG_PRE=EG def run_infinite(self,maxiter=50,tol=0,maxN=20,nlevel=1): ''' Run the application. Parameters: :maxiter: int, the maximum iteration times. :tol: float, the rolerence of energy. :maxN: int/list, maximum number of kept states and the tolerence for truncation weight. Return: tuple of EG,MPS. ''' if isinstance(self.hgen.evolutor,NullEvolutor): raise ValueError('The evolutor must not be null!') if not self.symm_handler==None and nlevel!=1: raise NotImplementedError('The symmetric Handler can not be used in multi-level calculation!') if not self.symm_handler==None and self.bmg is None: raise NotImplementedError('The symmetric Handler can not without Block marker generator!') self.reset() EL=[] hgen=copy.deepcopy(self.hgen) if isinstance(hgen.evolutor,NullEvolutor): raise ValueError('The evolutor must not be null!') if maxiter>self.hgen.nsite: warnings.warn('Max iteration exceeded the chain length!') for i in xrange(maxiter): print 'Running iteration %s'%i t0=time.time() EG,err,phil=self.dmrg_step(hgen,hgen,tol=tol,nlevel=nlevel) EG=EG/(2.*(i+1)) if len(EL)>0: diff=EG-EL[-1] else: diff=Inf t1=time.time() print 'EG = %.5f, dE = %.2e, Elapse -> %.2f(D=%s), TruncError -> %.2e'%(EG,diff,t1-t0,hgen.ndim,err) EL.append(EG) if abs(diff)<tol: print 'Breaking!' break return EG,_get_mps(hgen,hgen,phi=phil[0],direction='->',labels=['s','a']) def dmrg_step(self,hgen_l,hgen_r,tol=0,maxN=20,e_estimate=None,nlevel=1,initial_state=None): ''' Run a single step of DMRG iteration. Parameters: :hgen_l,hgen_r: <ExpandGenerator>, the hamiltonian generator for left and right blocks. :tol: float, the rolerence. :maxN: int, maximum number of kept states and the tolerence for truncation weight. :initial_state: 1D array/None, the initial state(prediction), None for random. Return: tuple of (ground state energy(float), unitary matrix(2D array), kpmask(1D array of bool), truncation error(float)) ''' direction=self.status['direction'] target_block=self.target_block t0=time.time() intraop_l,intraop_r,interop=[],[],[] hndim=hgen_l.hndim ndiml0,ndimr0=hgen_l.ndim,hgen_r.ndim NL,NR=hgen_l.N,hgen_r.N #filter operators to extract left-only and right-only blocks. interop=filter(lambda op:isinstance(op,OpString) and (NL+1 in op.siteindex),hgen_l.hchain.query(NL)) #site NL and NL+1 OPL=hgen_l.expand1() HL0=OPL['H'] #expansion can not do twice to the same hamiltonian generator! if hgen_r is hgen_l: OPR,HR0=OPL,HL0 else: OPR=hgen_r.expand1() HR0=OPR['H'] #blockize HL0 and HR0 NL,NR=hgen_l.N,hgen_r.N if self.bmg is not None: n=max(NL,NR) if isinstance(hgen_l.evolutor,MaskedEvolutor) and n>1: kpmask_l=hgen_l.evolutor.kpmask(NL-2) #kpmask is also related to block marker!!! kpmask_r=hgen_r.evolutor.kpmask(NR-2) bml,pml=self.bmg.update1(trunc_bm(hgen_l.block_marker or self.bmg.bm0,kpmask_l)).compact_form() bmr,pmr=self.bmg.update1(trunc_bm(hgen_r.block_marker or self.bmg.bm0,kpmask_r)).compact_form() else: bml,pml=self.bmg.update1(hgen_l.block_marker).compact_form() bmr,pmr=self.bmg.update1(hgen_r.block_marker).compact_form() else: bml,pml=None,None #get_blockmarker(HL0) bmr,pmr=None,None #get_blockmarker(HR0) if target_block is None: Hc,bm_tot=_gen_hamiltonian_full(HL0,HR0,hgen_l,hgen_r,interop=interop),None else: if False: #efficiency cross over Hc,bm_tot,pm_tot=_gen_hamiltonian_block0(HL0,HR0,hgen_l=hgen_l,hgen_r=hgen_r,\ blockinfo=dict(bml=bml,bmr=bmr,pml=pml,pmr=pmr,bmg=self.bmg,target_block=target_block),interop=interop) else: Hc,bm_tot,pm_tot=_gen_hamiltonian_block(HL0,HR0,hgen_l=hgen_l,hgen_r=hgen_r,\ blockinfo=dict(bml=bml,bmr=bmr,pml=pml,pmr=pmr,bmg=self.bmg,target_block=target_block),interop=interop) #get the starting eigen state v00! if initial_state is None: initial_state=random.random(bm_tot.N) if not self.symm_handler==None: if hgen_l is not hgen_r: #Note, The cases to disable C2 symmetry, #1. NL!=NR #2. NL==NR, reflection is not used(and not the first iteration). self.symm_handler.update_handlers(OPL=OPL,OPR=OPR,useC=False) else: nl=(int32(1-sign4bm(bml,self.bmg,diag_only=True))/2)[argsort(pml)] self.symm_handler.update_handlers(OPL=OPL,OPR=OPR,n=nl,useC=True) v00=self.symm_handler.project_state(phi=initial_state) if self.iprint==10:assert(self.symm_handler.check_op(H)) else: v00=initial_state #perform diagonalization ##1. detect specific block for diagonalization, get v0 and projector projector=self.symm_handler.get_projector() if len(self.symm_handler.symms)!=0 else None if self.bmg is None or target_block is None: v0=v00/norm(v00) else: indices=pm_tot[bm_tot.get_slice(target_block,uselabel=True)] v0=v00[indices] if projector is not None: projector=projector[indices] ##2. diagonalize to get desired number of levels detect_C2=self.symm_handler.target_sector.has_key('C')# and not symm_handler.useC t1=time.time() if norm(v0)==0: warnings.warn('Empty v0') v0=None print 'The density of Hamiltonian -> %s'%(1.*len(Hc.data)/Hc.shape[0]**2) e,v=self._eigsh(Hc,v0,sigma=e_estimate,projector=projector, lc_search_space=self.symm_handler.detect_scope if detect_C2 else 1,k=nlevel,tol=1e-10) if v0 is not None: print 'The goodness of estimate -> %s'%(v0.conj()/norm(v0)).dot(v[:,0]) t2=time.time() ##3. permute back eigen-vectors into original representation al,sl+1,sl+2,al+2 if bm_tot is not None: indices=pm_tot[bm_tot.get_slice(target_block,uselabel=True)] vl=zeros([bm_tot.N,v.shape[1]],dtype=v.dtype) vl[indices]=v; vl=vl.T else: vl=v.T #Do-wavefunction analysis, preliminary truncation is performed(up to ZERO_REF). for v in vl: v[abs(v)<ZERO_REF]=0 #spec1,U1,kpmask1,trunc_error=self.rdm_analysis(phis=vl,bml=bml,bmr=bmr,side='l',maxN=maxN) U1,specs,U2,(kpmask1,kpmask2),trunc_error=self.svd_analysis(phis=vl,bml=HL0.shape[0] if bml is None else bml,\ bmr=HR0.shape[0] if bmr is None else bmr,pml=pml,pmr=pmr,maxN=maxN) print '%s states kept.'%sum(kpmask1) hgen_l.trunc(U=U1,kpmask=kpmask1) #kpmask is also important for setting up the sign if hgen_l is not hgen_r: #spec2,U2,kpmask2,trunc_error=self.rdm_analysis(phis=vl,bml=bml,bmr=bmr,side='r',maxN=maxN) hgen_r.trunc(U=U2,kpmask=kpmask2) phil=[phi.reshape([ndiml0,hndim,ndimr0,hndim]) for phi in vl] t3=time.time() print 'Elapse -> prepair:%.2f, eigen:%.2f, trunc: %.2f'%(t1-t0,t2-t1,t3-t2) return e,trunc_error,phil def svd_analysis(self,phis,bml,bmr,pml,pmr,maxN): ''' The direct analysis of state(svd). Parameters: :phis: list of 1D array, the kept eigen states of current iteration. :bml/bmr: <BlockMarker>/int, the block marker for left and right blocks/or the dimensions. :maxN: int, the maximum kept values. Return: tuple of (spec, U), the spectrum and Unitary matrix from the density matrix. ''' if isinstance(bml,numbers.Number): use_bm=False ndiml,ndimr=bml,bmr else: ndiml,ndimr=bml.N,bmr.N use_bm=True phi=sum(phis,axis=0).reshape([ndiml,ndimr])/sqrt(len(phis)) #construct wave function of equal distribution of all states. phi[abs(phi)<ZERO_REF]=0 if use_bm: phi=phi[pml] phi=phi[:,pmr] def mapping_rule(bli): res=self.bmg.bcast_sub([self.target_block],[bli])[0] return tuple(res) U,S,V,S2=svdb(phi,bm=bml,bm2=bmr,mapping_rule=mapping_rule,full_matrices=True) else: U,S,V=svd(phi,full_matrices=True);U2=V.T.conj() if ndimr>=ndiml: S2=append(S,zeros(ndimr-ndiml)) else: S2=append(S,zeros(ndiml-ndimr)) S,S2=S2,S S,S2=sps.diags(S,0),sps.diags(S2,0) spec_l=S.dot(S.T.conj()).diagonal().real spec_r=S2.T.conj().dot(S2).diagonal().real if use_bm: if self.iprint==10 and not (bml.check_blockdiag(U.dot(sps.diags(spec_l,0)).dot(U.T.conj())) and\ bmr.check_blockdiag((V.T.conj().dot(sps.diags(spec_r,0))).dot(V))): raise Exception('''Density matrix is not block diagonal, which is not expected, 1. make sure your are using additive good quantum numbers. 2. avoid ground state degeneracy.''') #permute U and V U,V=U.tocsr()[argsort(pml)],V.tocsc()[:,argsort(pmr)] U2=V.T.conj() kpmasks=[] for Ui,spec in zip([U,U2],[spec_l,spec_r]): kpmask=zeros(Ui.shape[1],dtype='bool') spec_cut=sort(spec)[max(0,Ui.shape[0]-maxN)] kpmask[(spec>=spec_cut)&(spec>ZERO_REF)]=True trunc_error=sum(spec[~kpmask]) kpmasks.append(kpmask) U,U2=_eliminate_zeros(U,ZERO_REF),_eliminate_zeros(U2,ZERO_REF) return U,(spec_l,spec_r),U2,kpmasks,trunc_error def rdm_analysis(self,phis,bml,bmr,side,maxN): ''' The analysis of reduced density matrix. Parameters: :phis: list of 1D array, the kept eigen states of current iteration. :bml/bmr: <BlockMarker>/int, the block marker for left and right blocks/or the dimensions. :side: 'l'/'r', view the left or right side as the system. :maxN: the maximum kept values. Return: tuple of (spec, U), the spectrum and Unitary matrix from the density matrix. ''' assert(side=='l' or side=='r') ndiml,ndimr=(bml,bmr) if isinstance(bml,numbers.Number) else (bml.N,bmr.N) phis=[phi.reshape([ndiml,ndimr]) for phi in phis] rho=0 phil=[] if side=='l': for phi in phis: phi=sps.csr_matrix(phi) rho=rho+phi.dot(phi.T.conj()) phil.append(phi) bm=bml else: for phi in phis: phi=sps.csc_matrix(phi) rho=rho+phi.T.dot(phi.conj()) phil.append(phi) bm=bmr if bm is not None: rho=bm.blockize(rho) if self.iprint==10 and not bm.check_blockdiag(rho,tol=1e-5): ion() pcolor(exp(abs(rho.toarray().real))) show_bm(bm) pdb.set_trace() raise Exception('''Density matrix is not block diagonal, which is not expected, 1. make sure your are using additive good quantum numbers. 2. avoid ground state degeneracy.''') spec,U=eigbh(rho,bm=bm) kpmask=zeros(U.shape[1],dtype='bool') spec_cut=sort(spec)[max(0,U.shape[0]-maxN)] kpmask[(spec>=spec_cut)&(spec>ZERO_REF)]=True trunc_error=sum(spec[~kpmask]) print 'With %s(%s) blocks.'%(bm.nblock,bm.nblock) return spec,U,kpmask,trunc_error def state_prediction(self,phi,l,direction): ''' Predict the state for the next iteration. Parameters: :phi: ndarray, the state from the last iteration, [llink, site1, rlink, site2] :l: int, the current division point, the size of left block. :direction: '->'/'<-', the moving direction. Return: ndarray, the new state in the basis |al+1,sl+2,sl+3,al+3>. reference -> PRL 77. 3633 ''' assert(direction=='<-' or direction=='->') nsite=self.hgen.nsite NL,NR=l,nsite-l phi=tensor.Tensor(phi,labels=['a_%s'%(NL-1),'s_%s'%(NL),'b_%s'%(NR-1),'t_%s'%NR]) #l=NL-1 if self.reflect and nsite%2==0 and l==nsite/2-1 and direction=='->': #hard prediction! return self._state_prediction_hard(phi) hgen_l,hgen_r=self.query('l',NL),self.query('r',NR) lr=NR-2 if direction=='->' else NR-1 ll=NL-1 if direction=='->' else NL-2 A=hgen_l.evolutor.A(ll,dense=True) #get A[sNL](NL-1,NL) B=hgen_r.evolutor.A(lr,dense=True) #get B[sNR](NL+1,NL+2) if direction=='->': A=tensor.Tensor(A,labels=['s_%s'%NL,'a_%s'%(NL-1),'a_%s'%NL]).conj() B=tensor.Tensor(B,labels=['t_%s'%(NR-1),'b_%s'%(NR-2),'b_%s'%(NR-1)])#.conj() #!the conjugate? right side shrink, so B(al,al+1) do not conjugate. phi=tensor.contract([A,phi,B]) phi=phi.chorder([0,1,3,2]) if hgen_r.use_zstring: #cope with the sign problem n1=(1-Z4scfg(hgen_l.spaceconfig).diagonal())/2 nr=(1-hgen_r.zstring(lr).diagonal())/2 n_tot=n1[:,newaxis,newaxis]*(nr[:,newaxis]+n1) phi=phi*(1-2*(n_tot%2)) else: A=tensor.Tensor(A,labels=['s_%s'%(NL-1),'a_%s'%(NL-2),'a_%s'%(NL-1)])#.conj() B=tensor.Tensor(B,labels=['t_%s'%NR,'b_%s'%(NR-1),'b_%s'%NR]).conj() #!the conjugate? phi=tensor.contract([A,phi,B]) phi=phi.chorder([1,0,3,2]) if hgen_r.use_zstring: #cope with the sign problem n1=(1-Z4scfg(hgen_l.spaceconfig).diagonal())/2 nr=(1-hgen_r.zstring(lr+1).diagonal())/2 n_tot=n1*(nr[:,newaxis]) phi=phi*(1-2*(n_tot%2)) return phi def _state_prediction_hard(self,phi): ''' The hardest prediction for reflection point for phi(al,sl+1,sl+2,al+2) -> phi(al-1,sl,sl+1,al+1') ''' nsite=self.hgen.nsite l=nsite/2 hgen_l,hgen_r0,hgen_r=self.query('l',l-1),self.query('r',l+2),self.query('r',l-1) #do regular evolution to phi(al,sl+1,sl+2,al+2) -> phi(al-1,sl,sl+1,al+1) A=hgen_l.evolutor.A(l-2,dense=True) #get A[sNL](NL-1,NL) B=hgen_r0.evolutor.A(l-1,dense=True) #get B[sNR](NL+1,NL+2) A=tensor.Tensor(A,labels=['s_%s'%(l-1),'a_%s'%(l-2),'a_%s'%(l-1)]) B=tensor.Tensor(B,labels=['t_%s'%l,'b_%s'%(l-1),'b_%s'%(l)]).conj() phi=tensor.contract([A,phi,B]) if hgen_r.use_zstring: #cope with the sign problem n1=(1-Z4scfg(hgen_l.spaceconfig).diagonal())/2 nr=(1-hgen_r0.zstring(l-1).diagonal())/2 n_tot=n1[:,newaxis,newaxis]*(nr+n1[:,newaxis]) phi=phi*(1-2*(n_tot%2)) #do the evolution from phi(al-1,sl,sl+1,al+1) -> phi(al-1,sl,sl+1,al+1') #first calculate tensor R(al+1',al+1), right one incre, left decre. BL0=hgen_r0.evolutor.get_AL(dense=True)[:l-1] BL=hgen_r.evolutor.get_AL(dense=True) BL0=[tensor.Tensor(bi,labels=['t_%s'%(i+1),'b_%s'%i,'b_%s'%(i+1)]) for i,bi in enumerate(BL0)] BL=[tensor.Tensor(bi,labels=['t_%s'%(i+1),'b_%s'%i+('\'' if i!=0 else ''),'b_%s\''%(i+1)]).conj() for i,bi in enumerate(BL)] R=BL[0]*BL0[0] for i in xrange(1,l-1): R=tensor.contract([R,BL0[i],BL[i]]) #second, calculate phi*R phi=phi*R phi=phi.chorder([0,1,3,2]) return phi def get_mps(self,phi,l,labels=['s','a'],direction=None): ''' Get the MPS from run-time phi, and evolution matrices. Parameters: :phi: ndarray, the eigen-function of current step. :l: int, the size of left block. :direction: '->'/'<-'/None, if None, the direction is provided by the truncation information. Return: <MPS>, the disired MPS, the canonicallity if decided by the current position. ''' #get the direction assert(direction=='<-' or direction=='->') nsite=self.hgen.nsite NL,NR=l,nsite-l hgen_l,hgen_r=self.query('l',NL),self.query('r',NR) return _get_mps(hgen_l,hgen_r,phi,direction,labels) def fix_tail(mps,spaceconfig,parity,head2tail=True): ''' Fix the ordering to normal order(reverse). Parameters: :mps: <MPS>, the matrix product state. :spaceconfig: <SpaceConfig>, :parity: int, 1 for odd parity, 0 for even parity. :head2tail: bool, move head to tail if True, else move tail to head. Return: <MPS>, the new MPS. ''' nsite=mps.nsite assert(allclose(mps.forder,[0]+range(1,nsite)[::-1])) n1=(1-Z4scfg(spaceconfig).diagonal())/2 site_axis=mps.site_axis if head2tail: j=list(mps.forder).index(0) norder=array(mps.forder)-1 norder[j]=nsite-1 else: j=list(mps.forder).index(nsite-1) norder=array(mps.forder)+1 norder[j]=0 mps.forder=norder if parity==1: return mps if j<mps.l: mps.AL[j]=mps.AL[j]*(1-2*(n1%2))[tuple([slice(None)]+[newaxis]*(2-site_axis))] else: mps.BL[j-mps.l]=mps.BL[j-mps.l]*(1-2*(n1%2))[tuple([slice(None)]+[newaxis]*(2-site_axis))] return mps
gpl-2.0
2,516,026,093,265,604,000
43.63012
160
0.568474
false
dariox2/CADL
test/testyida9b.py
1
6390
# # test shuffle_batch w/2 file queues # # example with pipeline returning batch with pairs # of matching files (e.g. color + black/white), # using a seed for repeating the random sequence # # v.9b - store all pics, wait till end, then save # all together. Sequences are identical. # import os import numpy as np import matplotlib.pyplot as plt print("Loading tensorflow...") import tensorflow as tf #from libs import utils import datetime tf.set_random_seed(1) def create_input_pipeline_2(files1, files2, batch_size, shape, crop_shape=None, crop_factor=1.0, n_threads=1, seed=None): producer1 = tf.train.string_input_producer( files1, capacity=len(files1), shuffle=False) producer2 = tf.train.string_input_producer( files2, capacity=len(files2), shuffle=False) # We need something which can open the files and read its contents. reader = tf.WholeFileReader() # We pass the filenames to this object which can read the file's contents. # This will create another queue running which dequeues the previous queue. keys1, vals1 = reader.read(producer1) keys2, vals2 = reader.read(producer2) # And then have to decode its contents as we know it is a jpeg image imgs1 = tf.image.decode_jpeg(vals1, channels=3) imgs2 = tf.image.decode_jpeg(vals2, channels=3) # We have to explicitly define the shape of the tensor. # This is because the decode_jpeg operation is still a node in the graph # and doesn't yet know the shape of the image. Future operations however # need explicit knowledge of the image's shape in order to be created. imgs1.set_shape(shape) imgs2.set_shape(shape) # Next we'll centrally crop the image to the size of 100x100. # This operation required explicit knowledge of the image's shape. if shape[0] > shape[1]: rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor), int(crop_shape[1] / crop_factor)] else: rsz_shape = [int(crop_shape[0] / crop_factor), int(shape[1] / shape[0] * crop_shape[1] / crop_factor)] rszs1 = tf.image.resize_images(imgs1, rsz_shape[0], rsz_shape[1]) rszs2 = tf.image.resize_images(imgs2, rsz_shape[0], rsz_shape[1]) crops1 = (tf.image.resize_image_with_crop_or_pad( rszs1, crop_shape[0], crop_shape[1]) if crop_shape is not None else imgs1) crops2 = (tf.image.resize_image_with_crop_or_pad( rszs2, crop_shape[0], crop_shape[1]) if crop_shape is not None else imgs2) min_after_dequeue = len(files1) // 5 capacity = min_after_dequeue + (n_threads + 1) * batch_size batch = tf.train.shuffle_batch([crops1, crops2], enqueue_many=False, batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue, num_threads=n_threads, seed=seed) return batch def montage_2(images, saveto=None): """Draw all images as a montage separated by 1 pixel borders. Also saves the file to the destination specified by `saveto`. Parameters ---------- images : numpy.ndarray Input array to create montage of. Array should be: batch x height x width x channels. saveto : str Location to save the resulting montage image. Returns ------- m : numpy.ndarray Montage image. """ if isinstance(images, list): images = np.array(images) img_h = images.shape[1] img_w = images.shape[2] n_plots = int(np.ceil(np.sqrt(images.shape[0]))) if len(images.shape) == 4 and images.shape[3] == 3: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5 else: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1)) * 0.5 for i in range(n_plots): for j in range(n_plots): this_filter = i * n_plots + j if this_filter < images.shape[0]: this_img = images[this_filter] m[1 + i + i * img_h:1 + i + (i + 1) * img_h, 1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img if saveto: # dja plt.imsave(arr=m, fname=saveto) return m def get_some_files(path): fs = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')] fs=sorted(fs) return fs print("Loading files...") filesX = get_some_files("../session-1/img_align_celeba/") # image set 1 (color) filesY = get_some_files("../session-1/img_align_celeba_n/") # matching set 2 (b/w) from libs.dataset_utils import create_input_pipeline batch_size = 8 input_shape = [218, 178, 3] crop_shape = [64, 64, 3] crop_factor = 0.8 #seed=15 # not really necessary? seed=None TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S") n_plots=2 n_bats=3 def runtest(sess, batch, idt): mntgset=[] for bat in range(n_bats): mntg=[] batres = sess.run(batch) batch_xs1=np.array(batres[0]) batch_xs2=np.array(batres[1]) for imn in range(batch_size): img1=batch_xs1[imn] / 255.0 # color image img2=batch_xs2[imn] / 255.0 # matching b/n image mntg.append(img1) mntg.append(img2) mntgset.append(mntg) return mntgset batch = create_input_pipeline_2( files1=filesX, files2=filesY, batch_size=batch_size, crop_shape=crop_shape, crop_factor=crop_factor, shape=input_shape, seed=seed) sess = tf.Session() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # First obtain all the images, plot none. smpls=[] for i in range(n_plots): mset=runtest(sess, batch, i) smpls.append(mset) # Now plot all the images at the same time. for i in range(n_plots): fig, axs = plt.subplots(1, 3, figsize=(9, 3)) for bat in range(n_bats): mimg=[] for imn in range(batch_size*2): mimg.append(smpls[i][bat][imn]) m=montage_2(mimg) axs[bat].imshow(m) axs[bat].set_title("batch #"+str(bat)) plt.savefig("tmp/y9b_"+str(i)+"_"+TID+".png", bbox_inches="tight") plt.show() # eop
apache-2.0
-5,814,980,172,684,987,000
28.311927
98
0.61252
false
wenleix/EdgePPR
src/python/deim-answerquery.py
1
2704
# DEIM import sys import os from sys import argv import numpy as np import time import scipy.sparse import numpy.linalg from scipy.sparse import csc_matrix import pprCommon if len(argv) != 9: print 'Usage %s [CSC-Prefix] [iset.txt] [isetSize] [U.npy] [Dim] [Param.txt] [TruthPrefix] [NumPaper]' % (argv[0]) sys.exit(1) cscPrefix = argv[1] isetFname = argv[2] isetSize = int(argv[3]) basisFname = argv[4] dim = int(argv[5]) paramFname = argv[6] pprPrefix = argv[7] numPaper = int(argv[8]) # Get interpolation set Iset = pprCommon.LoadIset(isetFname)[:isetSize] # Load the basis U = np.load(basisFname)[:, :dim].astype(float) (n, _) = U.shape # Add the following line due to a performance issue with early version of NumPy, see # http://mail.scipy.org/pipermail/scipy-user/2014-May/035694.html U = U.flatten().reshape(U.shape) # Load P_i and prepare for P[Iset, :] Pi = [] WtPi = [] for i in range(7): fname = '%s%d.bin' % (cscPrefix, i) _Pi = pprCommon.ReadBinCscMat(fname).tocsr() Pi.append(_Pi) WtPi.append(_Pi[Iset, :]) Wt = scipy.sparse.identity(n, format='csr')[Iset, :] WtU = U[Iset, :] # Get b b = np.zeros(n) b[:numPaper] = 0.15 Wtb = b[Iset] paramFile = open(paramFname, 'r') idx = 0 for line in paramFile.readlines(): if line == '\n': continue w = [float(s) for s in line.split(' ')] # Form the reduced system tic = time.time() WtPU = np.zeros(WtU.shape) for i in range(len(w)): WtPU += w[i] * (WtPi[i].dot(U)) WtMU = WtU - 0.85 * WtPU toc = time.time() print >> sys.stdout, "PROFILE: Reduced system formed, %.4f sec elapsed." % (toc - tic) # Reconstruct the PageRank vector tic = time.time() if isetSize > dim: (y, _, _, _) = numpy.linalg.lstsq(WtMU, Wtb) else: y = numpy.linalg.solve(WtMU, Wtb) toc = time.time() print >> sys.stdout, "PROFILE: Reduced system solved, %.4f sec elapsed." % (toc - tic) # Reconstruct the PageRank vector tic = time.time() x = U.dot(y) toc = time.time() print >> sys.stdout, "PROFILE: PageRank vector reconstructed, %.4f sec elapsed." % (toc - tic) # Evaluation xexact = pprCommon.LoadBinVec("%s-%d.bin" % (pprPrefix, idx)) delta = x - xexact print >> sys.stdout, 'EVAL: NL1 =', numpy.linalg.norm(delta, 1) / numpy.linalg.norm(xexact, 1) idx1 = sorted(np.array(range(len(xexact))), key=lambda idx: xexact[idx], reverse=True) idx2 = sorted(np.array(range(len(x))), key=lambda idx: x[idx], reverse=True) print >> sys.stdout, 'EVAL: KendallTau@100 =', pprCommon.KendallTau(idx1, idx2, 100) sys.stdout.flush() idx += 1 paramFile.close()
apache-2.0
4,050,262,296,371,785,000
25.509804
118
0.621672
false
huzq/scikit-learn
sklearn/linear_model/tests/test_least_angle.py
1
28898
import warnings import numpy as np import pytest from scipy import linalg from sklearn.base import clone from sklearn.model_selection import train_test_split from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_raises from sklearn.utils._testing import ignore_warnings from sklearn.utils._testing import assert_warns from sklearn.utils._testing import TempMemmap from sklearn.utils.fixes import np_version, parse_version from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model._least_angle import _lars_path_residues from sklearn.linear_model import LassoLarsIC, lars_path from sklearn.linear_model import Lars, LassoLars # TODO: use another dataset that has multiple drops diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples = y.size def test_simple(): # Principle of Lars is to keep covariances tied and decreasing # also test verbose output from io import StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same, with precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert len(output1) == len(output2) for o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path with precomputed Gram and Xy gives the right answer G = np.dot(X.T, X) Xy = np.dot(X.T, y) for method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation def test_lars_lstsq(): # Test that Lars gives least square solution at the end # of the path X1 = 3 * X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default value change when numpy >= 1.14 rcond = None if np_version >= parse_version('1.14') else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square solution at the end # of the path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is robust to collinearity in input X = np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]]) y = np.array([1., 0., 0]) rng = np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert (residual ** 2).sum() < 1. # just make sure it's bounded n_samples = 10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option returns the correct output alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _, coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram and Xy remains # correct X, y = 3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values of precompute G = np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when input is a singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test that checks that LARS Lasso is handling rank # deficient input data (with n_features < rank) in the same way # as coordinate descent Lasso y = [5, 0, 5] for X in ( [[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]] ): # To be able to use the coefs to compute the objective function, # we need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert obj_lars < obj_cd * (1. + 1e-8) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using coordinate descent give the # same results. X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert error < 0.01 # similar test, with the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert err < 1e-3 # same test, with normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert error < 0.01 def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using coordinate descent give the # same results when early stopping is used. # (test : before, in the middle, and in the last part of the path) alphas_min = [10, 0.9, 1e-4] for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert error < 0.01 # same test, with normalization for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert error < 0.01 def test_lasso_lars_path_length(): # Test that the path length of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned design, and check that # it does not blow up, and stays somewhat close to a solution given # by the coordinate descent solver # Also test that lasso_path (using lars_path output style) gives # the same result as lars_path and previous lasso output style # under these conditions. rng = np.random.RandomState(42) # Generate data n, m = 70, 100 k = 5 X = rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y = np.dot(X, w) sigma = 0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the LARS has to go # far in the path to converge, and check that LARS and coordinate # descent give the same answers # Note it used to be the case that Lars had to use the drop for good # strategy for this but this is no longer the case with the # equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y = [10, 10, 1] alpha = .0001 def objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert lars_obj < cd_obj * (1. + 1e-8) def test_lars_add_features(): # assure that at least some features get added if necessary # test for 6d2b4c # Hilbert matrix n = 5 H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert len(lars.coef_.nonzero()[0]) == 6 # The path should be of length 6 + 1 in a Lars going down to 6 # non-zero coefs assert len(lars.alphas_) == 7 @ignore_warnings def test_multitarget(): # Assure that estimators receiving multidimensional y do the right thing Y = np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) def test_lars_cv(): # Test the LassoLarsCV object by checking that the optimal alpha # increases as the number of samples increases. # This property is not actually guaranteed in general and is just a # property of the given dataset, with the given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') def test_lars_cv_max_iter(recwarn): warnings.simplefilter('always') with np.errstate(divide='raise', invalid='raise'): X = diabetes.data y = diabetes.target rng = np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X = np.c_[X, x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5, cv=5) lars_cv.fit(X, y) # Check that there is no warning in general and no ConvergenceWarning # in particular. # Materialize the string representation of the warning to get a more # informative error message in case of AssertionError. recorded_warnings = [str(w) for w in recwarn] assert recorded_warnings == [] def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert lars_bic.alpha_ > lars_aic.alpha_ assert len(nonzero_bic) < len(nonzero_aic) assert np.max(nonzero_bic) < diabetes.data.shape[1] # test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using automated memory mapping on large input, the # fold data is in read-only mode # This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) def test_lars_path_positive_constraint(): # this is the main test for the positive parameter on the lars_path method # the estimator classes just make use of this function # we do the test on the diabetes dataset # ensure that we get negative coefficients when positive=False # and all positive when positive=True # for method 'lar' (default) and lasso err_msg = "Positive constraint not supported for 'lar' coding method." with pytest.raises(ValueError, match=err_msg): linear_model.lars_path(diabetes['data'], diabetes['target'], method='lar', positive=True) method = 'lasso' _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() < 0 _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >= 0 # now we gonna test the positive option for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option of all estimator # classes in this same function here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using coordinate descent give the # same results when using the positive option # This test is basically a copy of the above with additional positive # option. However for the middle part, the comparison of coefficient values # for a range of alphas, we had to make an adaptations. See below. # not normalized data X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert error < 0.01 # The range of alphas chosen for coefficient comparison here is restricted # as compared with the above test without the positive option. This is due # to the circumstance that the Lars-Lasso algorithm does not converge to # the least-squares-solution for small alphas, see 'Least Angle Regression' # by Efron et al 2004. The coefficients are typically in congruence up to # the smallest alpha reached by the Lars-Lasso algorithm and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert err < 1e-3 # normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert error < 0.01 def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees with the LassoLars # implementation available in R (lars library) under the following # scenarios: # 1) fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True # Let's generate the data used in the bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario 1: Let's compare R vs sklearn when fit_intercept=False and # normalize=False ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare R vs sklearn when fit_intercept=True and # normalize=True # # Note: When normalize is equal to True, R returns the coefficients in # their original units, that is, they are rescaled back, whereas sklearn # does not do that, therefore, we need to do this step before comparing # their results. ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned by sklearn before comparing # against the R result (read the note above) temp = X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): """ Test that user input regarding copy_X is not being overridden (it was until at least version 0.21) """ lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): """ Test that user input to .fit for copy_X overrides default __init__ value """ lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('est', (LassoLars(alpha=1e-3), Lars())) def test_lars_with_jitter(est): # Test that a small amount of jitter helps stability, # using example provided in issue #2746 X = np.array([[0.0, 0.0, 0.0, -1.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0]]) y = [-2.5, -2.5] expected_coef = [0, 2.5, 0, 2.5, 0] # set to fit_intercept to False since target is constant and we want check # the value of coef. coef would be all zeros otherwise. est.set_params(fit_intercept=False) est_jitter = clone(est).set_params(jitter=10e-8, random_state=0) est.fit(X, y) est_jitter.fit(X, y) assert np.mean((est.coef_ - est_jitter.coef_)**2) > .1 np.testing.assert_allclose(est_jitter.coef_, expected_coef, rtol=1e-3) def test_X_none_gram_not_none(): with pytest.raises(ValueError, match="X cannot be None if Gram is not None"): lars_path(X=None, y=[1], Gram='not None')
bsd-3-clause
-8,861,591,846,023,694,000
36.923885
79
0.59101
false
maxscheurer/pycontact
PyContact/gui/MainWindow.py
1
22094
import multiprocessing import warnings import copy import sys import PyQt5.QtCore as QtCore from PyQt5.QtCore import pyqtSlot, QObject from PyQt5.QtWidgets import (QMainWindow, QTabWidget, QLabel, QDialog, QApplication, QGridLayout, QFileDialog, QWidget) from PyQt5.Qt import Qt from PyQt5.QtGui import QIntValidator from PyQt5.QtSvg import QSvgGenerator import numpy as np from . import MainQtGui from .SasaWidgets import SasaWidget from .Canvas import Canvas from .Dialogues import FileLoaderDialog, AnalysisDialog from .ExportTabWidget import ExportTabWidget from .Statistics import Statistics from .Plotters import * from ..core.ContactAnalyzer import * from .ErrorBox import ErrorBox from .ErrorMessages import ErrorMessages from ..core.LogPool import * from . import Preferences from ..exampleData.datafiles import DEFAULTSESSION, DEFAULTSESSION_PY3 from .VMDControlPanel import VMDControlPanel from ..core.DataHandler import DataHandler multiprocessing.log_to_stderr() np.set_printoptions(threshold=np.inf) with warnings.catch_warnings(): warnings.simplefilter("ignore") class MainWindow(QMainWindow, MainQtGui.Ui_MainWindow, QObject): """PyContact Application Main Window with timeline.""" def closeEvent(self, event): """Closing application when Exit on MainWindow is clicked.""" event.accept() QApplication.quit() def __init__(self, parent=None): self.config = None self.analysis = None self.maps = None super(MainWindow, self).__init__(parent) self.contacts = [] self.filteredContacts = [] self.setupUi(self) self.setWindowTitle("PyContact") # painter contains both labels and frame boxes for drawing self.painter = Canvas() self.scrollArea.setWidget(self.painter) self.scrollArea.horizontalScrollBar().valueChanged.connect(self.horizontalScrollBarChanged) self.actionExportData.triggered.connect(self.pushExport) self.exportContactDataButton.clicked.connect(self.pushExport) self.actionLoad_Data.triggered.connect(self.loadDataPushed) self.actionExport_Session.triggered.connect(self.exportSession) self.actionImport_Session.triggered.connect(self.importSession) self.actionShow_Info.triggered.connect(self.showDeveloperInfo) # settings and filters self.settingsView = PreferencesWidget() self.settingsView.applySettingsButton.clicked.connect(self.updateSettings) self.applyFilterButton.clicked.connect(self.updateFilters) # statistics self.statisticsButton.clicked.connect(self.showStatistics) # frames stride posIntValidator = QIntValidator() posIntValidator.setBottom(1) self.frameStrideField.setValidator(posIntValidator) # analysis button self.analysisButton.clicked.connect(self.analyzeDataPushed) # contact area button self.actionContact_Area_Calculations.triggered.connect(self.showContactAreaView) # preferences self.actionPreferences.triggered.connect(self.openPrefs) # apply color button, outdated? self.colorScheme = ColorScheme.bbsc self.actionDefault.triggered.connect(self.loadDefault) self.currentSelection1 = "-" self.currentSelection2 = "-" # setup of extra widgets self.exportWidget = ExportTabWidget() self.sasaView = SasaWidget() self.statisticsView = None self.analysis_state = False self.vismode = False self.visModeButton.setCheckable(True) self.visModeButton.setChecked(False) self.visModeButton.clicked.connect(self.switchedToVisMode) self.vmdpanel = VMDControlPanel() self.actionVMD_Remote_Control.triggered.connect(self.showVMDControlPanel) self.painter.clickedRowSignal.connect(self.updateVMDSelections) self.painter.clickedColumnSignal.connect(self.updateVMDFrame) self.updateSettings() self.updateFilters() # self.tableTest = Widget() # self.tableTest.setGeometry(100, 100, 400, 400) # self.tableTest.show() # from ..db.DbReader import read_residue_db_all # res = read_residue_db_all() # residueList = [] # 1st: name, 2nd scpolarity # for k in res: # residueList.appen([k["name"], k["scpolarity"]]) self.actionDefault.setText("Load sample data") def horizontalScrollBarChanged(self): x = self.scrollArea.horizontalScrollBar().value() y = self.painter.labelView.y() self.painter.labelView.move(x, y) def showVMDControlPanel(self): """Shows the VMD control panel, to remotely access VMD from PyContact.""" self.vmdpanel.show() def showContactAreaView(self): """Shows the SASA computation panel.""" self.sasaView.nsPerFrame = float(self.settingsView.nsPerFrameField.text()) self.sasaView.show() if self.analysis: self.sasaView.setFilePaths(self.analysis.getFilePaths()) def switchedToVisMode(self): """Switch to vis mode, to show selected contacts directly in VMD.""" if self.visModeButton.isChecked(): self.vismode = True # conversions with clicked frames are not allowed self.frameStrideField.setText("1") else: self.vismode = False self.painter.switchToVisMode(self.vismode) self.updateSettings() self.updateFilters() @pyqtSlot() def updateVMDSelections(self): """Updates the selected contact in VMD via the vmd panel.""" if self.vmdpanel.connected: self.vmdpanel.updateSelections(self.analysis.sel1text, self.analysis.sel2text, [self.filteredContacts[self.painter.globalClickedRow]]) @pyqtSlot() def updateVMDFrame(self): """Updates the selected frame in VMD via the vmd panel.""" if self.vmdpanel.connected: self.vmdpanel.gotoVMDFrame(self.painter.clickedColumn) def updateSelectionLabels(self, sel1, sel2): """Updates the current selection in the info labels.""" self.currentSelection1 = sel1 self.currentSelection2 = sel2 self.selection1label.setText(sel1) self.selection2label.setText(sel2) def importSession(self): """Imports a saved session from file.""" fnames = QFileDialog.getOpenFileNames(self, "Open file") importfile = "" for f in fnames[0]: importfile = f break if importfile == "" or len(fnames) == 0: return self.contacts, arguments, trajArgs, self.maps, contactResults = DataHandler.importSessionFromFile(importfile) self.analysis = Analyzer(*arguments) self.analysis.contactResults = contactResults self.analysis.setTrajectoryData(*trajArgs) self.analysis.finalAccumulatedContacts = self.contacts self.sasaView.setFilePaths(*self.analysis.getFilePaths()) self.exportWidget.setFilePaths(*self.analysis.getFilePaths()) self.updateSelectionLabels(arguments[5], arguments[6]) self.updateSettings() self.updateFilters() def exportSession(self): """Exports the current session to file.""" fileName = QFileDialog.getSaveFileName(self, 'Export file') filestring = fileName[0] if filestring == "": return if self.contacts is not None and self.analysis is not None: self.setInfoLabel("Exporting current session...") DataHandler.writeSessionToFile(filestring, self.analysis) self.cleanInfoLabel() else: box = ErrorBox(ErrorMessages.NOEXPDATA) box.exec_() return def loadDefault(self): """Loads the default session.""" if (sys.version_info > (3, 0)): self.contacts, arguments, trajArgs, self.maps, contactResults = DataHandler.importSessionFromFile(DEFAULTSESSION_PY3) else: self.contacts, arguments, trajArgs, self.maps, contactResults = DataHandler.importSessionFromFile(DEFAULTSESSION) self.analysis = Analyzer(*arguments) self.analysis.contactResults = contactResults self.analysis.setTrajectoryData(*trajArgs) self.analysis.finalAccumulatedContacts = self.contacts self.sasaView.setFilePaths(*self.analysis.getFilePaths()) self.exportWidget.setFilePaths(*self.analysis.getFilePaths()) self.updateSelectionLabels(arguments[5], arguments[6]) self.updateSettings() self.updateFilters() def loadDataPushed(self): """Loads the trajectory data with the chosen initial parameters.""" self.config, result = FileLoaderDialog.getConfig() if result == 1: QApplication.processEvents() self.setInfoLabel("Loading trajectory and running atomic contact analysis...") nproc = int(self.settingsView.coreBox.value()) self.analysis = Analyzer(self.config.psf, self.config.dcd, self.config.cutoff, self.config.hbondcutoff, self.config.hbondcutangle, self.config.sel1text, self.config.sel2text) QApplication.processEvents() try: self.analysis.runFrameScan(nproc) except: box = ErrorBox("Error while loading data: Probably you specified an atom selection with 0 atoms or invalid input files.") box.exec_() self.loadDataPushed() self.setInfoLabel("%d frames loaded." % len(self.analysis.contactResults)) self.updateSelectionLabels(self.config.sel1text, self.config.sel2text) self.sasaView.setFilePaths(*self.analysis.getFilePaths()) self.exportWidget.setFilePaths(*self.analysis.getFilePaths()) @pyqtSlot(float) def updateAnalyzedFrames(self, value): """Handles the progress bar update.""" self.progressBar.setValue(100 * value) QApplication.processEvents() def setInfoLabel(self, txt): """Sets the Info label text.""" self.statusLabel.setText(txt) def cleanInfoLabel(self): """Clears the Info label text.""" self.setInfoLabel("-") def analyzeDataPushed(self): """Handles the Analyzer after the Accumulation maps have been set.""" if self.analysis is None: box = ErrorBox(ErrorMessages.NODATA_PROMPTLOAD) box.exec_() return self.maps, result = AnalysisDialog.getMapping() if result == 1: self.analysis.frameUpdate.connect(self.updateAnalyzedFrames) self.setInfoLabel("Analyzing contacts...") map1 = self.maps[0] map2 = self.maps[1] nproc = int(self.settingsView.coreBox.value()) self.contacts = self.analysis.runContactAnalysis(map1, map2, nproc) self.progressBar.setValue(0) self.setInfoLabel("Updating timeline...") QApplication.processEvents() self.updateSettings() self.updateFilters() self.cleanInfoLabel() def updateSettings(self): """Updates the settings chosen from the settings view.""" self.painter.nsPerFrame = float(self.settingsView.nsPerFrameField.text()) self.painter.threshold = float(self.settingsView.thresholdField.text()) self.painter.rendered = False self.painter.colorScheme = self.colorScheme # self.painter.customColor = self.customColor self.painter.repaint() self.painter.update() self.sasaView.nsPerFrame = float(self.settingsView.nsPerFrameField.text()) def updateFilters(self): """Updates the chosen filters in MainWindow.""" if self.vismode is True: self.frameStrideField.setText("1") stride = int(self.frameStrideField.text()) if stride < 1: stride = 1 QApplication.processEvents() self.frameStrideField.setText(str(stride)) self.painter.merge = stride self.painter.labelView.clean() self.painter.showHbondScores = False # total time filter totalTimeActive = self.activeTotalTimeCheckbox.isChecked() scoreActive = self.activeScoreCheckbox.isChecked() sortingActive = self.activeSortingBox.isChecked() onlyActive = self.onlyBoxActiveCheckbox.isChecked() filterActive = (totalTimeActive or scoreActive or sortingActive or onlyActive) weightActive = False # only filter given range rangeFilterActive = self.filterRangeCheckbox.isChecked() if len(self.contacts) > 0: lower = int(self.lowerRangeField.text()) - 1 upper = self.upperRangeField.text() if upper == "end": upper = len(self.contacts[0].scoreArray) else: upper = int(upper) if lower < 0: lower = 0 self.painter.range = [lower, upper] self.painter.rangeFilterActive = False self.filteredContacts = copy.deepcopy(self.contacts) # residue range filter range_filter = RangeFilter("resrange") self.filteredContacts = range_filter.filterByRange(self.filteredContacts, self.residARangeField.text(), self.residBRangeField.text(), AccumulationMapIndex.resid) self.filteredContacts = range_filter.filterByRange(self.filteredContacts, self.atomAIndexField.text(), self.atomBIndexField.text(), AccumulationMapIndex.index) # aminoacids name filter name_filter = NameFilter("name") self.filteredContacts = name_filter.filterContactsByName(self.filteredContacts, self.residANameField.text(), self.residBNameField.text(), AccumulationMapIndex.resname) self.filteredContacts = name_filter.filterContactsByName(self.filteredContacts, self.atomANameField.text(), self.atomBNameField.text(), AccumulationMapIndex.name) # range filter if rangeFilterActive: self.painter.rangeFilterActive = True frameRangeFilter = FrameFilter("framer") self.filteredContacts = frameRangeFilter.extractFrameRange(self.filteredContacts, [lower, upper]) for c in self.filteredContacts: c.setScores() c.setContactType() # weight functions if weightActive: if self.currentFunctionType == FunctionType.sigmoid: x0 = float(self.sigX0Field.text()) L = float(self.sigLField.text()) k = float(self.sigKField.text()) y0 = float(self.sigY0Field.text()) sig = SigmoidWeightFunction("sig", np.arange(0, len(self.contacts[0].scoreArray), 1), x0, L, k, y0) self.filteredContacts = sig.weightContactFrames(self.filteredContacts) elif self.currentFunctionType == FunctionType.rect: x0 = float(self.rectX0Field.text()) x1 = float(self.rectX1Field.text()) h = float(self.rectHField.text()) y0 = float(self.rectY0Field.text()) rect = RectangularWeightFunction("rect", np.arange(0, len(self.contacts[0].scoreArray), 1), x0, x1, h, y0) self.filteredContacts = rect.weightContactFrames(self.filteredContacts) elif self.currentFunctionType == FunctionType.linear: y0 = float(self.linY0Field.text()) y1 = float(self.linY1Field.text()) lin = LinearWeightFunction("rect", np.arange(0, len(self.contacts[0].scoreArray), 1), y0, y1) self.filteredContacts = lin.weightContactFrames(self.filteredContacts) # other filters if filterActive: if totalTimeActive: operator = self.compareTotalTimeDropdown.currentText() value = float(self.totalTimeField.text()) filter = TotalTimeFilter("tottime", operator, value) self.filteredContacts = filter.filterContacts(self.filteredContacts) if scoreActive: operator = self.compareScoreDropdown.currentText() value = float(self.scoreField.text()) filter = ScoreFilter("score", operator, value, self.meanDropdown.currentText()) self.filteredContacts = filter.filterContacts(self.filteredContacts) if sortingActive: key = self.sortingKeyDropdown.currentText() descending = SortingOrder.mapping[self.sortingOrderDropdown.currentText()] sorter = Sorting("sorting", key, descending) sorter.setThresholdAndNsPerFrame(float(self.settingsView.thresholdField.text()), float(self.settingsView.nsPerFrameField.text())) self.filteredContacts = sorter.sortContacts(self.filteredContacts) if onlyActive: key = self.selectOnlyToolbox.currentText() only = OnlyFilter("only", key, 0) self.filteredContacts = only.filterContacts(self.filteredContacts) if key == "hbonds": self.painter.showHbondScores = True self.painter.contacts = self.filteredContacts self.painter.rendered = False self.painter.repaint() self.painter.update() if len(self.filteredContacts) == 0: self.painter.labelView.clean() else: # no weight or filters self.painter.showHbondScores = False self.painter.contacts = self.filteredContacts self.painter.rendered = False self.painter.repaint() self.painter.update() # Update data for export self.exportWidget.setContacts(self.filteredContacts) if self.maps is not None: self.exportWidget.setMaps(self.maps[0], self.maps[1]) self.exportWidget.setMapLabels(self.analysis.sel1text, self.analysis.sel2text) self.vmdpanel.sel1 = self.analysis.sel1text self.vmdpanel.sel2 = self.analysis.sel2text self.vmdpanel.filteredContactList = self.filteredContacts self.exportWidget.setThresholdAndNsPerFrame(self.painter.threshold, self.painter.nsPerFrame) def openPrefs(self): """Opens the preferences panel.""" self.settingsView.show() def showStatistics(self): """Shows general statistics of the analyzed data over all frames.""" if len(self.contacts) == 0 or self.contacts is None: box = ErrorBox(ErrorMessages.NOSCORES_PROMPTANALYSIS) box.exec_() return self.statisticsView = Statistics(self.contacts, float(self.settingsView.nsPerFrameField.text())) self.statisticsView.showNormal() def showDeveloperInfo(self): """Shows information about the contributing authors.""" d = QDialog() grid = QGridLayout() d.setLayout(grid) info = QLabel("Developers: Maximilian Scheurer and Peter Rodenkirch") info2 = QLabel("") mail = QLabel("Contact: mscheurer@ks.uiuc.edu, rodenkirch@stud.uni-heidelberg.de") copyright = QLabel("Version 1.0.4") grid.addWidget(info, 0, 0) grid.addWidget(info2, 1, 0) grid.addWidget(mail, 2, 0) grid.addWidget(copyright, 3, 0) d.setWindowTitle("Developer Info") d.setFixedSize(500,150) d.setWindowModality(Qt.ApplicationModal) d.exec_() def pushExport(self): """Opens the export panel.""" self.exportWidget.valueUpdated.connect(self.handleExportUpdate) self.exportWidget.setContacts(self.filteredContacts) if self.maps is not None: self.exportWidget.setMaps(self.maps[0], self.maps[1]) self.exportWidget.setMapLabels(self.analysis.sel1text, self.analysis.sel2text) self.exportWidget.setThresholdAndNsPerFrame(self.painter.threshold, self.painter.nsPerFrame) self.exportWidget.show() @QtCore.Slot(str, str) def handleExportUpdate(self, fileName, fileType): """Handles the paint event after the export of the current view has been initiated.""" if fileType == "PNG": if len(fileName) > 0: currentView = self.painter.grab() currentView.save(fileName) elif fileType == "SVG": if len(fileName) > 0: generator = QSvgGenerator() generator.setFileName(fileName) generator.setSize(self.painter.size()) generator.setViewBox(self.painter.rect()) self.painter.renderContact(generator) self.painter.rendered = False self.painter.repaint() self.painter.update() class PreferencesWidget(QTabWidget, Preferences.Ui_PreferencesPanel): """Defines the preferences panel""" def __init__(self, parent=None): super(QWidget, self).__init__(parent) self.setupUi(self) class ColorScheme: custom, bbsc = range(2)
gpl-3.0
-4,896,590,700,105,944,000
43.724696
137
0.626822
false
kauser-cse-buet/CerebralCortex
cerebralcortex/data_processor/signalprocessing/vector.py
1
4899
# Copyright (c) 2017, MD2K Center of Excellence # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import datetime from typing import List import numpy as np from numpy.linalg import norm from sklearn import preprocessing from cerebralcortex.kernel.datatypes.datapoint import DataPoint from cerebralcortex.kernel.datatypes.datastream import DataStream def normalize(datastream: DataStream) -> DataStream: """ :param datastream: :return: """ input_data = np.array([i.sample for i in datastream.data]) data = preprocessing.normalize(input_data, axis=0) result_data = [DataPoint.from_tuple(start_time=v.start_time, sample=data[i]) for i, v in enumerate(datastream.data)] result = DataStream.from_datastream(input_streams=[datastream]) result.data = result_data return result def magnitude(datastream: DataStream) -> DataStream: """ :param datastream: :return: """ input_data = np.array([i.sample for i in datastream.data]) data = norm(input_data, axis=1).tolist() # TODO: Fix function to not compute normalized magnitudes result_data = [DataPoint.from_tuple(start_time=v.start_time, sample=data[i]) for i, v in enumerate(datastream.data)] result = DataStream.from_datastream(input_streams=[datastream]) result.data = result_data return result def smooth(data: List[DataPoint], span: int = 5) -> List[DataPoint]: """ Smooths data using moving average filter over a span. The first few elements of data_smooth are given by data_smooth(1) = data(1) data_smooth(2) = (data(1) + data(2) + data(3))/3 data_smooth(3) = (data(1) + data(2) + data(3) + data(4) + data(5))/5 data_smooth(4) = (data(2) + data(3) + data(4) + data(5) + data(6))/5 for more details follow the below links: https://www.mathworks.com/help/curvefit/smooth.html http://stackoverflow.com/a/40443565 :return: data_smooth :param data: :param span: """ sample = [i.sample for i in data] sample_middle = np.convolve(sample, np.ones(span, dtype=int), 'valid') / span divisor = np.arange(1, span - 1, 2) sample_start = np.cumsum(sample[:span - 1])[::2] / divisor sample_end = (np.cumsum(sample[:-span:-1])[::2] / divisor)[::-1] sample_smooth = np.concatenate((sample_start, sample_middle, sample_end)) data_smooth = [] if len(sample_smooth) == len(data): for i, item in enumerate(data): dp = DataPoint.from_tuple(sample=sample_smooth[i], start_time=item.start_time, end_time=item.end_time) data_smooth.append(dp) else: raise Exception("Smoothed data length does not match with original data length.") return data_smooth def moving_average_curve(data: List[DataPoint], window_length: int) -> List[DataPoint]: """ Moving average curve from filtered (using moving average) samples. :return: mac :param data: :param window_length: """ sample = [i.sample for i in data] mac = [] for i in range(window_length, len(sample) - (window_length + 1)): sample_avg = np.mean(sample[i - window_length:i + window_length + 1]) mac.append(DataPoint.from_tuple(sample=sample_avg, start_time=data[i].start_time, end_time=data[i].end_time)) return mac def window_std_dev(data: List[DataPoint], window_start: datetime) -> DataPoint: """ :param data: :param window_start: :return: """ data_points = np.array([dp.sample for dp in data]) return DataPoint.from_tuple(window_start, np.std(data_points))
bsd-2-clause
3,324,807,605,425,304,000
34.759124
117
0.685446
false
shujingke/opencog
experiments/attention/plot.py
7
2404
#!/usr/bin/python import sys import csv import matplotlib.pyplot as plt import numpy as np import time as t import re cont = True; while cont: cont = False f = open("dump-av.data",'r') lines = csv.reader(f) line0 = lines.__next__() groups = int(line0[0]) groupsize = int(line0[1]) data = (np.zeros([groups+2,groupsize,2,0])).tolist() current_group = [[],[]] colors = ['r','g','b','k','c','m'] for line in lines: data[groups+1][0][0].append(float(line[6])) data[groups+1][0][1].append(float(line[4])) current_group[0].append(float(line[5])) current_group[1].append(float(line[4])) if (line[0].startswith("group")): ints = [int(s) for s in re.findall(r'\d+', line[0])] group = ints[0] word = ints[1] data[group][word][0].append(float(line[1])) data[group][word][1].append(float(line[4])) elif (line[0].startswith("non")): word = int(line[0][10:]) if word >= groupsize: continue data[groups][word][0].append(float(line[1])) data[groups][word][1].append(float(line[4])) f = open("dump-hebtv.data",'r') lines = csv.reader(f) hebtv = {'null': [[],[]]} for line in lines: #if line[1].startswith("group0"): #and line[2].startswith("group1"): #if line[1][:6] != line[2][:6]: try: if line[0] in hebtv: hebtv[line[0]][0].append(float(line[3])) hebtv[line[0]][1].append(float(line[5])) else: hebtv[line[0]] = [[],[]] hebtv[line[0]][0].append(float(line[3])) hebtv[line[0]][1].append(float(line[5])) except: print(line) fig = plt.figure() #ax1 = fig.add_subplot(211) #ax1.plot(current_group[1],current_group[0],color='y') ax2 = fig.add_subplot(121) for i in range(groups+2): for w in range(groupsize): #if i == groups: # continue clr = plt.cm.jet(1. * i / (groups+1)) ax2.plot(data[i][w][1],data[i][w][0],color=clr) ax3 = fig.add_subplot(122) for key,e in hebtv.items(): ax3.plot(e[1],e[0]) figManager = plt.get_current_fig_manager() figManager.window.showMaximized() plt.show()
agpl-3.0
-3,385,728,861,306,983,000
26.953488
76
0.507903
false
svalenti/pessto
trunk/src/ntt/efosccalibdef.py
1
6561
def makefringingmask(listimg, _output, _interactive, _combine='average', _rejection='avsigclip'): # print "LOGX:: Entering `makefringingmask` method/function in # %(__file__)s" % globals() import ntt from ntt.util import readhdr, readkey3, delete, updateheader import glob import os import sys import re import string from pyraf import iraf iraf.noao(_doprint=0) iraf.immatch(_doprint=0) iraf.imutil(_doprint=0) iraf.nproto(_doprint=0) iraf.proto(_doprint=0) toforget = ['nproto.objmasks', 'proto.fixpix'] for t in toforget: iraf.unlearn(t) if _interactive == True: listimg2 = [] for img in listimg: _exptime = readkey3(readhdr(img), 'exptime') if float(_exptime) >= 10: answ = 'xxx' while answ.lower() not in ['y', 'n', 's', 'a']: iraf.display(img, frame=1, fill='yes') answ = raw_input( 'use this image (yes,no,stop (not more images),all) [[y]/n/s/a] ? ') if not answ: answ = 'y' if answ.lower() == 'y': listimg2.append(img) elif answ.lower() == 'a': listimg2 = listimg[:] if answ.lower() in ['a', 's']: break listimg = listimg2[:] iraf.nproto.objmasks1.fitxord = 1 iraf.nproto.objmasks1.fityord = 1 hdr0 = readhdr(listimg[0]) _date = readkey3(hdr0, 'date-obs') _filter = readkey3(hdr0, 'filter') _exptime = readkey3(hdr0, 'exptime') _instrume = readkey3(hdr0, 'instrume') _ron = readkey3(hdr0, 'ron') _gain = readkey3(hdr0, 'gain') badpixelmask = 'bad_pixel_mask.pl' if not os.path.isfile(badpixelmask): os.system('cp ' + ntt.__path__[0] + '/archive/' + _instrume + '/badpixels/badpixel_20100210.pl ' + badpixelmask) ff = open('_listmask', 'w') hh = open('_listobgz', 'w') for img in listimg: _exptime = readkey3(readhdr(img), 'exptime') hh.write('z_' + img + '\n') ff.write('mask_' + img + '\n') delete('mask_' + img) aaa = iraf.hedit(img, delete='yes', field='OBJMASK', up='yes', verify='no', Stdout=1) aaa = iraf.hedit(img, delete='yes', field='BPM', up='yes', verify='no', Stdout=1) delete('z_' + img) iraf.imutil.imexpr(expr='(a - median(a))/' + str(_exptime), a=img, output='z_' + img, verbose='no') ntt.util.updateheader('z_' + img, 0, {'EXPTIME': [1, '']}) ff.close() hh.close() if not _output: _output = 'fringing_' + str(_date) + '_' + str(_filter) + '.fits' delete(_output) print ' making mask for each frame .......' ccc = iraf.nproto.objmasks(images='@_listobgz', objmasks='@_listmask', omtype='boolean', blksize=-16, convolv='block 3 3', hsigma=5, lsigma=3, minpix=10, ngrow=2, agrow=4., Stdout=1) print 'combining all frames, masking the objects .....' iraf.imcombine('@_listobgz', output=_output, masktyp='!OBJMASK', maskval=0, combine=_combine, reject=_rejection, scale='none', statsec='[100:800,100:800]', rdnoise='', gain='', nlow=1, nhigh=1, logfile='imcombinelog') ntt.util.phase3header(_output) ntt.util.updateheader( _output, 0, {'BUNIT': ['ADU', 'pixel units(ADU,electrons)']}) ntt.util.updateheader(_output, 0, {'FILETYPE': [11231, 'fringing frame']}) return _output ########################################################################## def makefringing(listimg, _output, _xorder, _yorder, _interactive, combine='average', rejection='avsigclip'): # print "LOGX:: Entering `makefringing` method/function in %(__file__)s" % # globals() import os import string import re import ntt from ntt.util import readhdr, readkey3, delete, updateheader from pyraf import iraf from pyfits import open as popen from numpy import median, where, mean iraf.noao(_doprint=0) iraf.nproto(_doprint=0) if _interactive == True: listimg2 = [] for img in listimg: _exptime = readkey3(readhdr(img), 'exptime') if float(_exptime) >= 100: answ = 'xxx' while answ.lower() not in ['y', 'n', 's', 'a']: iraf.display(img, frame=1, fill='yes') answ = raw_input( 'use this image (yes,no,stop (not more images),all) [[y]/n/s/a] ? ') if not answ: answ = 'y' if answ.lower() == 'y': listimg2.append(img) elif answ.lower() == 'a': listimg2 = listimg[:] if answ.lower() in ['a', 's']: break listimg = listimg2[:] iraf.nproto.objmasks1.fitxord = 1 iraf.nproto.objmasks1.fityord = 1 listmask = [] ff = open('_listmask', 'w') gg = open('_listobg', 'w') hh = open('_listobgz', 'w') for img in listimg: _exptime = readkey3(readhdr(img), 'exptime') gg.write(img + '\n') hh.write('z_' + img + '\n') ff.write('mask_' + img + '\n') listmask.append('mask_' + img) os.system('rm -rf mask_' + img) os.system('rm -rf t_' + img) os.system('rm -rf z_' + img) iraf.imsurfit(img, 't_' + img, xorder=_xorder, yorder=_yorder, type_ou='residual', regions='all') iraf.imarith('t_' + img, '/', _exptime, 'z_' + img) os.system('rm -rf t_' + img) gg.close() ff.close() hh.close() os.system('rm ' + _output) iraf.objmasks(images='@_listobgz', objmasks='@_listmask', omtype='boolean', blksize=-16, convolv='block 3 3', hsigma=5, lsigma=3, minpix=10, ngrow=2, agrow=4.) iraf.imcombine('@_listobgz', output=_output, masktyp='!OBJMASK', maskval=0, combine=_combine, reject=_rejection, scale='none', statsec='[100:800,100:800]', offsets='', rdnoise='', gain='', nlow=1, nhigh=1, logfile='imcombinelog') for img in listimg: os.system('rm -rf mask_' + img) os.system('rm -rf z_' + img) os.system('rm _listmask') os.system('rm _listobg') iraf.display(_output, frame=2, fill='yes') ##########################################################################
mit
-4,302,062,766,581,251,000
39.751553
135
0.515623
false
andrewyang96/RacetrackGenerator
utils.py
1
3274
import numpy as np from numpy.linalg import norm from scipy.ndimage.interpolation import rotate import math from itertools import tee, izip, cycle, islice # Recipes from https://docs.python.org/2/library/itertools.html#recipes def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return izip(a, b) def roundrobin(*iterables): "roundrobin('ABC', 'D', 'EF') --> A D E B F C" # Recipe credited to George Sakkis pending = len(iterables) nexts = cycle(iter(it).next for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending)) # Vector manipulation functions def angle(u, v, unit='r'): """Returns angle between vectors u and v.""" if all([elem == 0 for elem in u]) or all([elem == 0 for elem in v]): raise ValueError("Cannot pass a zero-vector") if len(u) != len(v): raise ValueError("u and v must be of the same length") cos = np.dot(u,v) / norm(u) / norm(v) rad = math.arccos(np.clip(cos, -1, 1)) if unit == 'r': return rad elif unit == 'd': return math.degrees(rad) else: raise ValueError("{0} is not a valid keyword".format(output)) def midpoint(p1, p2): """Returns midpoint between points p1 and p2.""" mdpt = np.divide(np.add(p1, p2), 2) return tuple(mdpt) def vector(p1, p2): """Returns vector from p1 to p2.""" vec = np.subtract(p2, p1) return tuple(vec) def distance(p1, p2): """Returns distance between points p1 and p2.""" return norm(vector(p1, p2)) def normalize(v): n = np.divide(v, norm(v)) return tuple(n) def rotationMatrix(theta, unit='r'): """Returns rotation matrix using angle theta.""" if unit == 'r': pass elif unit == 'd': theta = math.radians(theta) else: raise ValueError("{0} is not a valid keyword".format(output)) rotmat = np.zeros((2,2)) rotmat[0][0] = math.cos(theta) rotmat[0][1] = -math.sin(theta) rotmat[1][0] = math.sin(theta) rotmat[1][1] = rotmat[0][0] rotmat = np.matrix(rotmat) return rotmat def rotate(v, theta, unit='r'): """Returns a rotated vector.""" if len(v) != 2: raise ValueError("Dimension of v must be 2") if unit == 'r': pass elif unit == 'd': theta = math.radians(theta) rotmat = rotationMatrix(theta) v = np.matrix(v).getT() rotvec = (rotmat * v).getT().getA().flatten() return tuple(rotvec) # Given two points that represent a leg of a right triangle and an angle, # find the endpoint of the hypotenuse. def endpoint(p1, p2, theta, unit='r'): if unit == 'r': pass elif unit == 'd': theta = math.radians(theta) else: raise ValueError("{0} is not a valid keyword".format(output)) vec = vector(p1, p2) rotvec = rotate(vec, theta) veclength = norm(vec) / math.cos(theta) rotvec = np.multiply(veclength, normalize(rotvec)) endpt = np.add(p1, rotvec) return tuple(endpt) # Sorting functions def getRanks(l): order = np.array(l).argsort() ranks = order.argsort() return ranks
mit
4,421,793,365,104,066,600
27.973451
73
0.602321
false
asteca/ASteCA
packages/decont_algors/read_da.py
1
1530
import numpy as np from ..inp import data_IO def main(cl_region, clust_name, memb_file, readda_idcol=0, readda_mpcol=-2): """ Read MP values from file. Any star in the defined cluster whose ID is not found in the membership file will be assigned MP=0.5. The indexes for the ID and MPs columns are hardcoded. """ print("Reading membership probabilities from file") # Read IDs and MPs from file. data = data_IO.dataRead(clust_name, memb_file, 'r') # Read IDs as strings since that is how they are stored in 'cl_region' id_list = [str(_) for _ in data.columns[readda_idcol]] try: memb_probs = data.columns[readda_mpcol] except IndexError: print(" WARNING: MPs column not found. Assigned MP=1. to all stars") memb_probs = np.ones(len(data)) N_not, memb_probs_cl_region = 0, [] # Assign probabilities read from file according to the star's IDs. for star in cl_region: if star[0] in id_list: # Index of star in file. i = id_list.index(star[0]) # Assign the probability stored in file for this star. memb_probs_cl_region.append(memb_probs[i]) else: # Stars not present in the list are assigned a fixed value. memb_probs_cl_region.append(0.5) N_not += 1 if N_not > 0: print((" WARNING: {} stars where not present in the membership\n" + " file and were assigned MP=0.5").format(N_not)) return memb_probs_cl_region
gpl-3.0
-2,454,906,291,747,150,000
36.317073
77
0.622222
false
fluidmotion/visTools
visGIS.py
1
8415
''' GIS conversion/interpolation tools for use in VisIt (http://wci.llnl.gov/codes/visit/). Relies on ogr/gdal. ''' __author__ = 'fluidmotion' import os,sys from osgeo import ogr, gdal import numpy as np import matplotlib.pyplot as plt class shpTools(): ''' accepts line shapefile of contours - interpolates to an ascii grid using scipy. example data from http://www.swfwmd.state.fl.us/data/gis/layer_library/category/potmaps ''' def __init__(self, shp, outfile, fld, cellsize, interptype='rbf', wkt=''): self.shp = shp self.fld = fld self.outfile = outfile self.cellsize = cellsize self.nrow = 0 self.ncol = 0 self.x = 0 self.y = 0 self.z = [] self.ext = 0 self.interptype = interptype self.wkt = wkt self.minval = -1e30 def getVerts(self): driver = ogr.GetDriverByName('ESRI Shapefile') ds = driver.Open(self.shp) lyr = ds.GetLayer(0) self.ext = lyr.GetExtent() self.nrow = int((self.ext[3]-self.ext[2])/self.cellsize) self.ncol = int((self.ext[1]-self.ext[0])/self.cellsize) x = [] y = [] print 'getting vertices...' for i in range(len(lyr)): pts = lyr.GetNextFeature() geom = pts.GetGeometryRef().Clone() v = geom.GetPoints() for verts in v: x.append(verts[0]) y.append(verts[1]) self.z.append(pts.GetFieldAsDouble(self.fld)) self.x = np.array(x) self.y = np.array(y) print(self.nrow, self.ncol) def grid2dem(self): hdr = 'ncols {}\n'.format(self.ncol) hdr = hdr+'nrows {}\n'.format(self.nrow) hdr = hdr+'xllcorner {}\n'.format(self.ext[0]) hdr = hdr+'yllcorner {}\n'.format(self.ext[2]) hdr = hdr+'cellsize {}\n'.format(self.cellsize) hdr = hdr+'NODATA_value -999' np.savetxt(self.outfile,np.flipud(self.grid.T), header=hdr, comments='') def checkdups(self, source): # http://stackoverflow.com/questions/5419204/index-of-duplicates-items-in-a-python-list from collections import defaultdict def list_duplicates(seq): tally = defaultdict(list) for i,item in enumerate(seq): tally[item].append(i) # return ((key,locs) for key,locs in tally.items() # if len(locs)>1) return (locs for key,locs in tally.items() if len(locs)>1) # for dup in sorted(list_duplicates(source)): return sorted(list_duplicates(source)) # print dup def interpGrid(self): ptx = np.array(self.x) pty = np.array(self.y) z = np.array(self.z) print(len(ptx), 'length x') # remove duplicate x values dups = self.checkdups(self.x) ptx = np.delete(ptx, dups) pty = np.delete(pty, dups) z = np.delete(z, dups) print(len(ptx), 'length x') pts = zip(self.x, self.y) # gridx, gridy = np.mgrid[uprLeft[0]:lwrRight[0]:50j,uprLeft[1]:lwrRight[1]:50j] gridx, gridy = np.mgrid[self.ext[0]:self.ext[1]:self.ncol*1j, self.ext[2]:self.ext[3]:self.nrow*1j] ##### using griddata ##### if self.interptype == 'griddata': from scipy.interpolate import griddata self.grid = griddata(pts,self.z,(gridx,gridy), method='cubic',fill_value=-3e30) #### examples from ##### http://stackoverflow.com/questions/24978052/interpolation-over-regular-grid-in-python ##### using radial basis function #### if self.interptype == 'rbf': import scipy.interpolate as interpolate f = interpolate.Rbf(pty, ptx, z, function='linear') self.grid = f(gridy, gridx) ##### using kriging #### if self.interptype == 'gauss': from sklearn.gaussian_process import GaussianProcess # print math.sqrt(np.var(z)) # gp = GaussianProcess(theta0=0.1, thetaL=1.1, thetaU=10.1, nugget=0.000001) if np.min(z) <= 0: thetaL = 0.1 else: thetaL = np.min(z) print(np.min(z), thetaL, np.max(z)) # gp = GaussianProcess(regr='quadratic',corr='cubic',theta0=np.min(z),thetaL=thetaL,thetaU=np.max(z),nugget=0.05) gp = GaussianProcess(theta0=500,thetaL=100,thetaU=2000) gp.fit(X=np.column_stack([pty,ptx]),y=z) rr_cc_as_cols = np.column_stack([gridy.flatten(), gridx.flatten()]) self.grid = gp.predict(rr_cc_as_cols).reshape((self.ncol,self.nrow)) if self.interptype == 'krig': import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here # sp = samplingplan(2) # X = sp.optimallhc(20) # print(X) X = np.array(zip(self.x, self.y)) print(X.shape) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().squared # y = testfun(X) # print(y) y = self.z # Now that we have our initial data, we can create an instance of a Kriging model k = kriging(X, y)#, testfunction=testfun, name='simple') # k.train() # Now, five infill points are added. Note that the model is re-trained after each point is added # numiter = 5 # for i in range(numiter): # print 'Infill iteration {0} of {1}....'.format(i + 1, numiter) # newpoints = k.infill(1) # for point in newpoints: # k.addPoint(point, testfun(point)[0]) # k.train() # And plot the results k.plot() sys.exit() self.grid[self.grid < self.minval] = -2.99999989403e+030 #self.minval self.grid = np.flipud(self.grid.T) @staticmethod def getExtent(gt,cols,rows): ''' Return list of corner coordinates from a geotransform @type gt: C{tuple/list} @param gt: geotransform @type cols: C{int} @param cols: number of columns in the dataset @type rows: C{int} @param rows: number of rows in the dataset @rtype: C{[float,...,float]} @return: coordinates of each corner ''' ext=[] xarr=[0,cols] yarr=[0,rows] # for px in xarr: # for py in yarr: minx = gt[0] + (0 * gt[1]) + (0 * gt[2]) miny = gt[3] + (0 * gt[4]) + (rows * gt[5]) maxx = gt[0] + (cols * gt[1]) + (rows * gt[2]) maxy = gt[3] + (cols * gt[4]) + (0 * gt[5]) ext = [minx, maxx, miny, maxy] print minx,maxx, miny, maxy # yarr.reverse() return ext def writeRaster(self): print 'writing tiff...' xres = (self.ext[1] - self.ext[0]) / float(self.ncol) yres = (self.ext[3] - self.ext[2]) / float(self.nrow) geotransform = (self.ext[0], xres, 0, self.ext[3], 0, -yres) drv = gdal.GetDriverByName('GTiff') ds = drv.Create(self.outfile, self.ncol, self.nrow, 1 ,gdal.GDT_Float32) # Open the file band = ds.GetRasterBand(1) band.SetNoDataValue(-3e30) ds.SetGeoTransform(geotransform) # Specify its coordinates if self.wkt != '': ds.SetProjection(self.wkt) # Exports the coordinate system band.WriteArray(self.grid) # Writes my array to the raster # band.WriteArray(np.flipud(self.grid.T)) # Writes my array to the raster #http://gis.stackexchange.com/questions/37238/writing-numpy-array-to-raster-file # print grid.shape
mit
-7,491,133,490,942,863,000
37.139535
125
0.525253
false
heli522/scikit-learn
sklearn/utils/estimator_checks.py
33
48331
from __future__ import print_function import types import warnings import sys import traceback import inspect import pickle from copy import deepcopy import numpy as np from scipy import sparse import struct from sklearn.externals.six.moves import zip from sklearn.externals.joblib import hash, Memory from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import META_ESTIMATORS from sklearn.utils.testing import set_random_state from sklearn.utils.testing import assert_greater from sklearn.utils.testing import SkipTest from sklearn.utils.testing import ignore_warnings from sklearn.base import (clone, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, BaseEstimator) from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score from sklearn.lda import LDA from sklearn.random_projection import BaseRandomProjection from sklearn.feature_selection import SelectKBest from sklearn.svm.base import BaseLibSVM from sklearn.pipeline import make_pipeline from sklearn.utils.validation import DataConversionWarning from sklearn.cross_validation import train_test_split from sklearn.utils import shuffle from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris, load_boston, make_blobs BOSTON = None CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'] MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet', 'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess', 'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso', 'LassoLars', 'LinearRegression', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression', 'RANSACRegressor', 'RadiusNeighborsRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV'] def _yield_non_meta_checks(name, Estimator): yield check_estimators_dtypes yield check_fit_score_takes_y yield check_dtype_object yield check_estimators_fit_returns_self # Check that all estimator yield informative messages when # trained on empty datasets yield check_estimators_empty_data_messages if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']: # SpectralEmbedding is non-deterministic, # see issue #4236 # cross-decomposition's "transform" returns X and Y yield check_pipeline_consistency if name not in ['Imputer']: # Test that all estimators check their input for NaN's and infs yield check_estimators_nan_inf if name not in ['GaussianProcess']: # FIXME! # in particular GaussianProcess! yield check_estimators_overwrite_params if hasattr(Estimator, 'sparsify'): yield check_sparsify_coefficients yield check_estimator_sparse_data # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_estimators_pickle def _yield_classifier_checks(name, Classifier): # test classfiers can handle non-array data yield check_classifier_data_not_an_array # test classifiers trained on a single label always return this label yield check_classifiers_one_label yield check_classifiers_classes yield check_estimators_partial_fit_n_features # basic consistency testing yield check_classifiers_train if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"] # TODO some complication with -1 label and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]): # We don't raise a warning in these classifiers, as # the column y interface is used by the forests. yield check_supervised_y_2d # test if NotFittedError is raised yield check_estimators_unfitted if 'class_weight' in Classifier().get_params().keys(): yield check_class_weight_classifiers def _yield_regressor_checks(name, Regressor): # TODO: test with intercept # TODO: test with multiple responses # basic testing yield check_regressors_train yield check_regressor_data_not_an_array yield check_estimators_partial_fit_n_features yield check_regressors_no_decision_function yield check_supervised_y_2d if name != 'CCA': # check that the regressor handles int input yield check_regressors_int # Test if NotFittedError is raised yield check_estimators_unfitted def _yield_transformer_checks(name, Transformer): # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer', 'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']: yield check_transformer_data_not_an_array # these don't actually fit the data, so don't raise errors if name not in ['AdditiveChi2Sampler', 'Binarizer', 'FunctionTransformer', 'Normalizer']: # basic tests yield check_transformer_general yield check_transformers_unfitted def _yield_clustering_checks(name, Clusterer): yield check_clusterer_compute_labels_predict if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering yield check_estimators_partial_fit_n_features def _yield_all_checks(name, Estimator): for check in _yield_non_meta_checks(name, Estimator): yield check if issubclass(Estimator, ClassifierMixin): for check in _yield_classifier_checks(name, Estimator): yield check if issubclass(Estimator, RegressorMixin): for check in _yield_regressor_checks(name, Estimator): yield check if issubclass(Estimator, TransformerMixin): for check in _yield_transformer_checks(name, Estimator): yield check if issubclass(Estimator, ClusterMixin): for check in _yield_clustering_checks(name, Estimator): yield check def check_estimator(Estimator): """Check if estimator adheres to sklearn conventions. This estimator will run an extensive test-suite for input validation, shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin from sklearn.base. Parameters ---------- Estimator : class Class to check. """ name = Estimator.__class__.__name__ check_parameters_default_constructible(name, Estimator) for check in _yield_all_checks(name, Estimator): check(name, Estimator) def _boston_subset(n_samples=200): global BOSTON if BOSTON is None: boston = load_boston() X, y = boston.data, boston.target X, y = shuffle(X, y, random_state=0) X, y = X[:n_samples], y[:n_samples] X = StandardScaler().fit_transform(X) BOSTON = X, y return BOSTON def set_fast_parameters(estimator): # speed up some estimators params = estimator.get_params() if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"): estimator.set_params(n_iter=5) if "max_iter" in params: # NMF if estimator.max_iter is not None: estimator.set_params(max_iter=min(5, estimator.max_iter)) # LinearSVR if estimator.__class__.__name__ == 'LinearSVR': estimator.set_params(max_iter=20) if "n_resampling" in params: # randomized lasso estimator.set_params(n_resampling=5) if "n_estimators" in params: # especially gradient boosting with default 100 estimator.set_params(n_estimators=min(5, estimator.n_estimators)) if "max_trials" in params: # RANSAC estimator.set_params(max_trials=10) if "n_init" in params: # K-Means estimator.set_params(n_init=2) if estimator.__class__.__name__ == "SelectFdr": # be tolerant of noisy datasets (not actually speed) estimator.set_params(alpha=.5) if estimator.__class__.__name__ == "TheilSenRegressor": estimator.max_subpopulation = 100 if isinstance(estimator, BaseRandomProjection): # Due to the jl lemma and often very few samples, the number # of components of the random matrix projection will be probably # greater than the number of features. # So we impose a smaller number (avoid "auto" mode) estimator.set_params(n_components=1) if isinstance(estimator, SelectKBest): # SelectKBest has a default of k=10 # which is more feature than we have in most case. estimator.set_params(k=1) class NotAnArray(object): " An object that is convertable to an array" def __init__(self, data): self.data = data def __array__(self, dtype=None): return self.data def _is_32bit(): """Detect if process is 32bit Python.""" return struct.calcsize('P') * 8 == 32 def check_estimator_sparse_data(name, Estimator): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 X_csr = sparse.csr_matrix(X) y = (4 * rng.rand(40)).astype(np.int) for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']: X = X_csr.asformat(sparse_format) # catch deprecation warnings with warnings.catch_warnings(): if name in ['Scaler', 'StandardScaler']: estimator = Estimator(with_mean=False) else: estimator = Estimator() set_fast_parameters(estimator) # fit and predict try: estimator.fit(X, y) if hasattr(estimator, "predict"): pred = estimator.predict(X) assert_equal(pred.shape, (X.shape[0],)) if hasattr(estimator, 'predict_proba'): probs = estimator.predict_proba(X) assert_equal(probs.shape, (X.shape[0], 4)) except TypeError as e: if 'sparse' not in repr(e): print("Estimator %s doesn't seem to fail gracefully on " "sparse data: error message state explicitly that " "sparse input is not supported if this is not the case." % name) raise except Exception: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: it should raise a TypeError if sparse input " "is explicitly not supported." % name) raise def check_dtype_object(name, Estimator): # check that estimators treat dtype object as numeric if possible rng = np.random.RandomState(0) X = rng.rand(40, 10).astype(object) y = (X[:, 0] * 4).astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) with warnings.catch_warnings(): estimator = Estimator() set_fast_parameters(estimator) estimator.fit(X, y) if hasattr(estimator, "predict"): estimator.predict(X) if hasattr(estimator, "transform"): estimator.transform(X) try: estimator.fit(X, y.astype(object)) except Exception as e: if "Unknown label type" not in str(e): raise X[0, 0] = {'foo': 'bar'} msg = "argument must be a string or a number" assert_raises_regex(TypeError, msg, estimator.fit, X, y) def check_transformer_general(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) X -= X.min() _check_transformer(name, Transformer, X, y) _check_transformer(name, Transformer, X.tolist(), y.tolist()) def check_transformer_data_not_an_array(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 this_X = NotAnArray(X) this_y = NotAnArray(np.asarray(y)) _check_transformer(name, Transformer, this_X, this_y) def check_transformers_unfitted(name, Transformer): X, y = _boston_subset() with warnings.catch_warnings(record=True): transformer = Transformer() assert_raises((AttributeError, ValueError), transformer.transform, X) def _check_transformer(name, Transformer, X, y): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # on numpy & scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) n_samples, n_features = np.asarray(X).shape # catch deprecation warnings with warnings.catch_warnings(record=True): transformer = Transformer() set_random_state(transformer) set_fast_parameters(transformer) # fit if name in CROSS_DECOMPOSITION: y_ = np.c_[y, y] y_[::2, 1] *= 2 else: y_ = y transformer.fit(X, y_) X_pred = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple): for x_pred in X_pred: assert_equal(x_pred.shape[0], n_samples) else: # check for consistent n_samples assert_equal(X_pred.shape[0], n_samples) if hasattr(transformer, 'transform'): if name in CROSS_DECOMPOSITION: X_pred2 = transformer.transform(X, y_) X_pred3 = transformer.fit_transform(X, y=y_) else: X_pred2 = transformer.transform(X) X_pred3 = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): assert_array_almost_equal( x_pred, x_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( x_pred, x_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) else: assert_array_almost_equal( X_pred, X_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( X_pred, X_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) assert_equal(len(X_pred2), n_samples) assert_equal(len(X_pred3), n_samples) # raises error on malformed input for transform if hasattr(X, 'T'): # If it's not an array, it does not have a 'T' property assert_raises(ValueError, transformer.transform, X.T) @ignore_warnings def check_pipeline_consistency(name, Estimator): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) # check that make_pipeline(est) gives same score as est X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) pipeline = make_pipeline(estimator) estimator.fit(X, y) pipeline.fit(X, y) funcs = ["score", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func_pipeline = getattr(pipeline, func_name) result = func(X, y) result_pipe = func_pipeline(X, y) assert_array_almost_equal(result, result_pipe) @ignore_warnings def check_fit_score_takes_y(name, Estimator): # check that all estimators accept an optional y # in fit and score so they can be used in pipelines rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = inspect.getargspec(func).args assert_true(args[2] in ["y", "Y"]) @ignore_warnings def check_estimators_dtypes(name, Estimator): rnd = np.random.RandomState(0) X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) X_train_64 = X_train_32.astype(np.float64) X_train_int_64 = X_train_32.astype(np.int64) X_train_int_32 = X_train_32.astype(np.int32) y = X_train_int_64[:, 0] y = multioutput_estimator_convert_y_2d(name, y) for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: with warnings.catch_warnings(record=True): estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator, 1) estimator.fit(X_train, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): getattr(estimator, method)(X_train) def check_estimators_empty_data_messages(name, Estimator): e = Estimator() set_fast_parameters(e) set_random_state(e, 1) X_zero_samples = np.empty(0).reshape(0, 3) # The precise message can change depending on whether X or y is # validated first. Let us test the type of exception only: assert_raises(ValueError, e.fit, X_zero_samples, []) X_zero_features = np.empty(0).reshape(3, 0) # the following y should be accepted by both classifiers and regressors # and ignored by unsupervised models y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1])) msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required." assert_raise_message(ValueError, msg, e.fit, X_zero_features, y) def check_estimators_nan_inf(name, Estimator): rnd = np.random.RandomState(0) X_train_finite = rnd.uniform(size=(10, 3)) X_train_nan = rnd.uniform(size=(10, 3)) X_train_nan[0, 0] = np.nan X_train_inf = rnd.uniform(size=(10, 3)) X_train_inf[0, 0] = np.inf y = np.ones(10) y[:5] = 0 y = multioutput_estimator_convert_y_2d(name, y) error_string_fit = "Estimator doesn't check for NaN and inf in fit." error_string_predict = ("Estimator doesn't check for NaN and inf in" " predict.") error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator, 1) # try to fit try: estimator.fit(X_train, y) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_fit, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_fit, Estimator, exc) traceback.print_exc(file=sys.stdout) raise exc else: raise AssertionError(error_string_fit, Estimator) # actually fit estimator.fit(X_train_finite, y) # predict if hasattr(estimator, "predict"): try: estimator.predict(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_predict, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_predict, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_predict, Estimator) # transform if hasattr(estimator, "transform"): try: estimator.transform(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_transform, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_transform, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_transform, Estimator) def check_estimators_pickle(name, Estimator): """Test that we can pickle all estimators""" check_methods = ["predict", "transform", "decision_function", "predict_proba"] X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) # some estimators can't do features less than 0 X -= X.min() # some estimators only take multioutputs y = multioutput_estimator_convert_y_2d(name, y) # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_random_state(estimator) set_fast_parameters(estimator) estimator.fit(X, y) result = dict() for method in check_methods: if hasattr(estimator, method): result[method] = getattr(estimator, method)(X) # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) unpickled_estimator = pickle.loads(pickled_estimator) for method in result: unpickled_result = getattr(unpickled_estimator, method)(X) assert_array_almost_equal(result[method], unpickled_result) def check_estimators_partial_fit_n_features(name, Alg): # check if number of features changes between calls to partial_fit. if not hasattr(Alg, 'partial_fit'): return X, y = make_blobs(n_samples=50, random_state=1) X -= X.min() with warnings.catch_warnings(record=True): alg = Alg() set_fast_parameters(alg) if isinstance(alg, ClassifierMixin): classes = np.unique(y) alg.partial_fit(X, y, classes=classes) else: alg.partial_fit(X, y) assert_raises(ValueError, alg.partial_fit, X[:, :-1], y) def check_clustering(name, Alg): X, y = make_blobs(n_samples=50, random_state=1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) n_samples, n_features = X.shape # catch deprecation and neighbors warnings with warnings.catch_warnings(record=True): alg = Alg() set_fast_parameters(alg) if hasattr(alg, "n_clusters"): alg.set_params(n_clusters=3) set_random_state(alg) if name == 'AffinityPropagation': alg.set_params(preference=-100) alg.set_params(max_iter=100) # fit alg.fit(X) # with lists alg.fit(X.tolist()) assert_equal(alg.labels_.shape, (n_samples,)) pred = alg.labels_ assert_greater(adjusted_rand_score(pred, y), 0.4) # fit another time with ``fit_predict`` and compare results if name is 'SpectralClustering': # there is no way to make Spectral clustering deterministic :( return set_random_state(alg) with warnings.catch_warnings(record=True): pred2 = alg.fit_predict(X) assert_array_equal(pred, pred2) def check_clusterer_compute_labels_predict(name, Clusterer): """Check that predict is invariant of compute_labels""" X, y = make_blobs(n_samples=20, random_state=0) clusterer = Clusterer() if hasattr(clusterer, "compute_labels"): # MiniBatchKMeans if hasattr(clusterer, "random_state"): clusterer.set_params(random_state=0) X_pred1 = clusterer.fit(X).predict(X) clusterer.set_params(compute_labels=False) X_pred2 = clusterer.fit(X).predict(X) assert_array_equal(X_pred1, X_pred2) def check_classifiers_one_label(name, Classifier): error_string_fit = "Classifier can't train when only one class is present." error_string_predict = ("Classifier can't predict when only one class is " "present.") rnd = np.random.RandomState(0) X_train = rnd.uniform(size=(10, 3)) X_test = rnd.uniform(size=(10, 3)) y = np.ones(10) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() set_fast_parameters(classifier) # try to fit try: classifier.fit(X_train, y) except ValueError as e: if 'class' not in repr(e): print(error_string_fit, Classifier, e) traceback.print_exc(file=sys.stdout) raise e else: return except Exception as exc: print(error_string_fit, Classifier, exc) traceback.print_exc(file=sys.stdout) raise exc # predict try: assert_array_equal(classifier.predict(X_test), y) except Exception as exc: print(error_string_predict, Classifier, exc) raise exc def check_classifiers_train(name, Classifier): X_m, y_m = make_blobs(n_samples=300, random_state=0) X_m, y_m = shuffle(X_m, y_m, random_state=7) X_m = StandardScaler().fit_transform(X_m) # generate binary problem from multi-class one y_b = y_m[y_m != 2] X_b = X_m[y_m != 2] for (X, y) in [(X_m, y_m), (X_b, y_b)]: # catch deprecation warnings classes = np.unique(y) n_classes = len(classes) n_samples, n_features = X.shape with warnings.catch_warnings(record=True): classifier = Classifier() if name in ['BernoulliNB', 'MultinomialNB']: X -= X.min() set_fast_parameters(classifier) set_random_state(classifier) # raises error on malformed input for fit assert_raises(ValueError, classifier.fit, X, y[:-1]) # fit classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) assert_true(hasattr(classifier, "classes_")) y_pred = classifier.predict(X) assert_equal(y_pred.shape, (n_samples,)) # training set performance if name not in ['BernoulliNB', 'MultinomialNB']: assert_greater(accuracy_score(y, y_pred), 0.83) # raises error on malformed input for predict assert_raises(ValueError, classifier.predict, X.T) if hasattr(classifier, "decision_function"): try: # decision_function agrees with predict decision = classifier.decision_function(X) if n_classes is 2: assert_equal(decision.shape, (n_samples,)) dec_pred = (decision.ravel() > 0).astype(np.int) assert_array_equal(dec_pred, y_pred) if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)): # 1on1 of LibSVM works differently assert_equal(decision.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(decision, axis=1), y_pred) # raises error on malformed input assert_raises(ValueError, classifier.decision_function, X.T) # raises error on malformed input for decision_function assert_raises(ValueError, classifier.decision_function, X.T) except NotImplementedError: pass if hasattr(classifier, "predict_proba"): # predict_proba agrees with predict y_prob = classifier.predict_proba(X) assert_equal(y_prob.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(y_prob, axis=1), y_pred) # check that probas for all classes sum to one assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) # raises error on malformed input assert_raises(ValueError, classifier.predict_proba, X.T) # raises error on malformed input for predict_proba assert_raises(ValueError, classifier.predict_proba, X.T) def check_estimators_fit_returns_self(name, Estimator): """Check if self is returned when calling fit""" X, y = make_blobs(random_state=0, n_samples=9, n_features=4) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) assert_true(estimator.fit(X, y) is estimator) @ignore_warnings def check_estimators_unfitted(name, Estimator): """Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise either AttributeError or ValueError. The specific exception type NotFittedError inherits from both and can therefore be adequately raised for that purpose. """ # Common test for Regressors as well as Classifiers X, y = _boston_subset() with warnings.catch_warnings(record=True): est = Estimator() msg = "fit" if hasattr(est, 'predict'): assert_raise_message((AttributeError, ValueError), msg, est.predict, X) if hasattr(est, 'decision_function'): assert_raise_message((AttributeError, ValueError), msg, est.decision_function, X) if hasattr(est, 'predict_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_proba, X) if hasattr(est, 'predict_log_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_log_proba, X) def check_supervised_y_2d(name, Estimator): if "MultiTask" in name: # These only work on 2d, so this test makes no sense return rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) # fit estimator.fit(X, y) y_pred = estimator.predict(X) set_random_state(estimator) # Check that when a 2D y is given, a DataConversionWarning is # raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DataConversionWarning) warnings.simplefilter("ignore", RuntimeWarning) estimator.fit(X, y[:, np.newaxis]) y_pred_2d = estimator.predict(X) msg = "expected 1 DataConversionWarning, got: %s" % ( ", ".join([str(w_x) for w_x in w])) if name not in MULTI_OUTPUT: # check that we warned if we don't support multi-output assert_greater(len(w), 0, msg) assert_true("DataConversionWarning('A column-vector y" " was passed when a 1d array was expected" in msg) assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel()) def check_classifiers_classes(name, Classifier): X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 y_names = np.array(["one", "two", "three"])[y] for y_names in [y_names, y_names.astype('O')]: if name in ["LabelPropagation", "LabelSpreading"]: # TODO some complication with -1 label y_ = y else: y_ = y_names classes = np.unique(y_) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() if name == 'BernoulliNB': classifier.set_params(binarize=X.mean()) set_fast_parameters(classifier) set_random_state(classifier) # fit classifier.fit(X, y_) y_pred = classifier.predict(X) # training set performance assert_array_equal(np.unique(y_), np.unique(y_pred)) if np.any(classifier.classes_ != classes): print("Unexpected classes_ attribute for %r: " "expected %s, got %s" % (classifier, classes, classifier.classes_)) def check_regressors_int(name, Regressor): X, _ = _boston_subset() X = X[:50] rnd = np.random.RandomState(0) y = rnd.randint(3, size=X.shape[0]) y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds regressor_1 = Regressor() regressor_2 = Regressor() set_fast_parameters(regressor_1) set_fast_parameters(regressor_2) set_random_state(regressor_1) set_random_state(regressor_2) if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y # fit regressor_1.fit(X, y_) pred1 = regressor_1.predict(X) regressor_2.fit(X, y_.astype(np.float)) pred2 = regressor_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_regressors_train(name, Regressor): X, y = _boston_subset() y = StandardScaler().fit_transform(y) # X is already scaled y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): regressor = Regressor() set_fast_parameters(regressor) if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == 'PassiveAggressiveRegressor': regressor.C = 0.01 # raises error on malformed input for fit assert_raises(ValueError, regressor.fit, X, y[:-1]) # fit if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert_equal(y_pred.shape, y_.shape) # TODO: find out why PLS and CCA fail. RANSAC is random # and furthermore assumes the presence of outliers, hence # skipped if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'): print(regressor) assert_greater(regressor.score(X, y_), 0.5) @ignore_warnings def check_regressors_no_decision_function(name, Regressor): # checks whether regressors have decision_function or predict_proba rng = np.random.RandomState(0) X = rng.normal(size=(10, 4)) y = multioutput_estimator_convert_y_2d(name, X[:, 0]) regressor = Regressor() set_fast_parameters(regressor) if hasattr(regressor, "n_components"): # FIXME CCA, PLS is not robust to rank 1 effects regressor.n_components = 1 regressor.fit(X, y) funcs = ["decision_function", "predict_proba", "predict_log_proba"] for func_name in funcs: func = getattr(regressor, func_name, None) if func is None: # doesn't have function continue # has function. Should raise deprecation warning msg = func_name assert_warns_message(DeprecationWarning, msg, func, X) def check_class_weight_classifiers(name, Classifier): if name == "NuSVC": # the sparse version has a parameter that doesn't do anything raise SkipTest if name.endswith("NB"): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! raise SkipTest for n_centers in [2, 3]: # create a very noisy dataset X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) n_centers = len(np.unique(y_train)) if n_centers == 2: class_weight = {0: 1000, 1: 0.0001} else: class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} with warnings.catch_warnings(record=True): classifier = Classifier(class_weight=class_weight) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "min_weight_fraction_leaf"): classifier.set_params(min_weight_fraction_leaf=0.01) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) assert_greater(np.mean(y_pred == 0), 0.89) def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train, X_test, y_test, weights): with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifier.set_params(class_weight='balanced') classifier.fit(X_train, y_train) y_pred_balanced = classifier.predict(X_test) assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'), f1_score(y_test, y_pred, average='weighted')) def check_class_weight_balanced_linear_classifier(name, Classifier): """Test class weights with non-contiguous class labels.""" X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = np.array([1, 1, 1, -1, -1]) with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): # This is a very small dataset, default n_iter are likely to prevent # convergence classifier.set_params(n_iter=1000) set_random_state(classifier) # Let the model compute the class frequencies classifier.set_params(class_weight='balanced') coef_balanced = classifier.fit(X, y).coef_.copy() # Count each label occurrence to reweight manually n_samples = len(y) n_classes = float(len(np.unique(y))) class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)} classifier.set_params(class_weight=class_weight) coef_manual = classifier.fit(X, y).coef_.copy() assert_array_almost_equal(coef_balanced, coef_manual) def check_estimators_overwrite_params(name, Estimator): X, y = make_blobs(random_state=0, n_samples=9) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() with warnings.catch_warnings(record=True): # catch deprecation warnings estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) # Make a physical copy of the orginal estimator parameters before fitting. params = estimator.get_params() original_params = deepcopy(params) # Fit the model estimator.fit(X, y) # Compare the state of the model parameters with the original parameters new_params = estimator.get_params() for param_name, original_value in original_params.items(): new_value = new_params[param_name] # We should never change or mutate the internal state of input # parameters by default. To check this we use the joblib.hash function # that introspects recursively any subobjects to compute a checksum. # The only exception to this rule of immutable constructor parameters # is possible RandomState instance but in this check we explicitly # fixed the random_state params recursively to be integer seeds. assert_equal(hash(new_value), hash(original_value), "Estimator %s should not change or mutate " " the parameter %s from %s to %s during fit." % (name, param_name, original_value, new_value)) def check_sparsify_coefficients(name, Estimator): X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]]) y = [1, 1, 1, 2, 2, 2, 3, 3, 3] est = Estimator() est.fit(X, y) pred_orig = est.predict(X) # test sparsify with dense inputs est.sparsify() assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) def check_classifier_data_not_an_array(name, Estimator): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]]) y = [1, 1, 1, 2, 2, 2] y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_regressor_data_not_an_array(name, Estimator): X, y = _boston_subset(n_samples=50) y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_estimators_data_not_an_array(name, Estimator, X, y): if name in CROSS_DECOMPOSITION: raise SkipTest # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds estimator_1 = Estimator() estimator_2 = Estimator() set_fast_parameters(estimator_1) set_fast_parameters(estimator_2) set_random_state(estimator_1) set_random_state(estimator_2) y_ = NotAnArray(np.asarray(y)) X_ = NotAnArray(np.asarray(X)) # fit estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_parameters_default_constructible(name, Estimator): classifier = LDA() # test default-constructibility # get rid of deprecation warnings with warnings.catch_warnings(record=True): if name in META_ESTIMATORS: estimator = Estimator(classifier) else: estimator = Estimator() # test cloning clone(estimator) # test __repr__ repr(estimator) # test that set_params returns self assert_true(estimator.set_params() is estimator) # test if init does nothing but set parameters # this is important for grid_search etc. # We get the default parameters from init and then # compare these against the actual values of the attributes. # this comes from getattr. Gets rid of deprecation decorator. init = getattr(estimator.__init__, 'deprecated_original', estimator.__init__) try: args, varargs, kws, defaults = inspect.getargspec(init) except TypeError: # init is not a python function. # true for mixins return params = estimator.get_params() if name in META_ESTIMATORS: # they need a non-default argument args = args[2:] else: args = args[1:] if args: # non-empty list assert_equal(len(args), len(defaults)) else: return for arg, default in zip(args, defaults): assert_in(type(default), [str, int, float, bool, tuple, type(None), np.float64, types.FunctionType, Memory]) if arg not in params.keys(): # deprecated parameter, not in get_params assert_true(default is None) continue if isinstance(params[arg], np.ndarray): assert_array_equal(params[arg], default) else: assert_equal(params[arg], default) def multioutput_estimator_convert_y_2d(name, y): # Estimators in mono_output_task_error raise ValueError if y is of 1-D # Convert into a 2-D y for those estimators. if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV', 'MultiTaskLasso', 'MultiTaskElasticNet']): return y[:, np.newaxis] return y def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False): # Check if all iterative solvers, run for more than one iteratiom iris = load_iris() X, y_ = iris.data, iris.target if multi_output: y_ = y_[:, np.newaxis] set_random_state(estimator, 0) if name == 'AffinityPropagation': estimator.fit(X) else: estimator.fit(X, y_) assert_greater(estimator.n_iter_, 0) def check_transformer_n_iter(name, estimator): if name in CROSS_DECOMPOSITION: # Check using default data X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]] y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] else: X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() - 0.1 set_random_state(estimator, 0) estimator.fit(X, y_) # These return a n_iter per component. if name in CROSS_DECOMPOSITION: for iter_ in estimator.n_iter_: assert_greater(iter_, 1) else: assert_greater(estimator.n_iter_, 1) def check_get_params_invariance(name, estimator): class T(BaseEstimator): """Mock classifier """ def __init__(self): pass def fit(self, X, y): return self if name in ('FeatureUnion', 'Pipeline'): e = estimator([('clf', T())]) elif name in ('GridSearchCV' 'RandomizedSearchCV'): return else: e = estimator() shallow_params = e.get_params(deep=False) deep_params = e.get_params(deep=True) assert_true(all(item in deep_params.items() for item in shallow_params.items()))
bsd-3-clause
-1,601,981,510,062,638,000
35.669954
79
0.617575
false
tcstewar/2015-Embodied_Benchmarks
code/control_ev3.py
1
1729
import time import nengo import numpy as np import ev3link import embodied_benchmarks as bench link = ev3link.EV3Link('10.42.0.3') path0 = '/sys/class/tacho-motor/motor0/' link.write(path0 + 'command', 'run-direct') link.write(path0 + 'position', '0') print 'current position: ', link.read(path0 + 'position') start_time = time.time() model = nengo.Network() with model: def ev3_system(t, x): value = int(100 * x[0]) if value > 100: value = 100 if value < -100: value = -100 value = '%d' % value link.write(path0 + 'duty_cycle_sp', value) p = link.read(path0 + 'position') try: return float(p) / 180 * np.pi except: return 0 ev3 = nengo.Node(ev3_system, size_in=1, size_out=1) pid = bench.pid.PID(2,1,0, tau_d=0.001) control = nengo.Node(lambda t, x: pid.step(x[:1], x[1:]), size_in=2) nengo.Connection(ev3, control[:1], synapse=0) nengo.Connection(control, ev3, synapse=None) actual_time = nengo.Node(lambda t: time.time() - start_time) def desired_func(t, actual_t): return np.sin(actual_t*2*np.pi) desired = nengo.Node(desired_func, size_in=1) nengo.Connection(actual_time, desired, synapse=None) nengo.Connection(desired, control[1:], synapse=None) p_desired = nengo.Probe(desired, synapse=None) p_q = nengo.Probe(ev3, synapse=None) p_t = nengo.Probe(actual_time, synapse=None) sim = nengo.Simulator(model) start_time = time.time() sim.run(0.1) link.write(path0 + 'duty_cycle_sp', '0') import pylab pylab.plot(sim.data[p_t], sim.data[p_desired]) pylab.plot(sim.data[p_t], sim.data[p_q]) pylab.show()
gpl-2.0
-5,581,647,480,612,939,000
25.6
72
0.611914
false
mdigiorgio/lisa
libs/utils/energy_model.py
1
36419
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2016, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import namedtuple, OrderedDict from itertools import product import logging import operator import re import pandas as pd import numpy as np from devlib.utils.misc import memoized, mask_to_list from devlib import TargetError """Classes for modeling and estimating energy usage of CPU systems""" def read_multiple_oneline_files(target, glob_patterns): """ Quickly read many single-line files that match a glob pattern Finds all the files that match any of the glob patterns and, assuming that they each contain exactly 1 line of text, read them all at once. When the target or connection is slow this saves a lot of time when reading a large number of files. This will only work safely on stationary files, don't try to use it where the glob expansion will change often - for example /proc/**/autogroup would not work because /proc/ entries will likely appear & disappear while we're reading them. :param target: devlib target object to read from :param glob_pattern: Unix glob pattern matching the files to read :returns: A dictionary mapping matched paths to the values read. ``{}`` if no paths matched the globs. """ find_cmd = 'find ' + ' '.join(glob_patterns) try: paths = target.execute(find_cmd).split() except TargetError: return {} cmd = '{} | {} xargs cat'.format(find_cmd, target.busybox) contents = target.execute(cmd).splitlines() if len(contents) != len(paths): raise RuntimeError('File count mismatch while reading multiple files') return dict(zip(paths, contents)) class EnergyModelCapacityError(Exception): """Used by :meth:`EnergyModel.get_optimal_placements`""" pass class ActiveState(namedtuple('ActiveState', ['capacity', 'power'])): """Represents power and compute capacity at a given frequency :param capacity: Relative compute capacity at frequency :param power: Power usage at frequency """ def __new__(cls, capacity=None, power=None): return super(ActiveState, cls).__new__(cls, capacity, power) class _CpuTree(object): """Internal class. Abstract representation of a CPU topology. Each node contains either a single CPU or a set of child nodes. """ def __init__(self, cpu, children): if (cpu is None) == (children is None): raise ValueError('Provide exactly one of: cpu or children') self.parent = None self.cpu = cpu if cpu is not None: self.cpus = (cpu,) self.children = [] else: if len(children) == 0: raise ValueError('children cannot be empty') self.cpus = tuple(sorted(set().union(*[n.cpus for n in children]))) self.children = children for child in children: child.parent = self self.name = None def __repr__(self): name_bit = '' if self.name: name_bit = 'name="{}", '.format(self.name) if self.children: return '{}({}children={})'.format( self.__class__.__name__, name_bit, self.children) else: return '{}({}cpus={})'.format( self.__class__.__name__, name_bit, self.cpus) def _iter(self, include_non_leaves): for child in self.children: for child_i in child._iter(include_non_leaves): yield child_i if include_non_leaves or not self.children: yield self def iter_nodes(self): """Iterate over nodes depth-first, post-order""" return self._iter(True) def iter_leaves(self): """Iterate over leaves""" return self._iter(False) class EnergyModelNode(_CpuTree): """Describes topology and energy data for an EnergyModel. Represents a CPU topology with energy data. The active and idle state data represents the power usage of just the hardware resources of this topology level, not its children. e.g. If the node represents a cluster, the power numbers should not include power used by the CPU - that power should be included the data of the child nodes. Exactly one of ``cpu`` and ``children`` must be given. :param active_states: Dict mapping frequencies to :class:`ActiveState` values. Compute capacity data is optional for non-leaf nodes. :param idle_states: Dict mapping idle state names to power usage values :param cpu: The CPU this node represents. If provided, this is a leaf node. :type cpus: tuple(int) :param children: Non-empty list of child :class:`EnergyModelNode` objects :param name: Optional human-readable name for this node. Leaf (CPU) nodes have a default name of "cpuN" where N is the cpu number. :ivar cpus: CPUs contained in this node. Includes those of child nodes. :ivar cpu: For convenience, this holds the single CPU contained by leaf nodes. ``None`` for non-leaf nodes. """ def __init__(self, active_states, idle_states, cpu=None, children=None, name=None): super(EnergyModelNode, self).__init__(cpu, children) self._log = logging.getLogger('EnergyModel') def is_monotonic(l, decreasing=False): op = operator.ge if decreasing else operator.le return all(op(a, b) for a, b in zip(l, l[1:])) if active_states: # Sanity check for active_states's frequencies freqs = active_states.keys() if not is_monotonic(freqs): self._log.warning( 'Active states frequencies are expected to be ' 'monotonically increasing. Freqs: {}'.format(freqs)) # Sanity check for active_states's powers power_vals = [s.power for s in active_states.values()] if not is_monotonic(power_vals): self._log.warning( 'Active states powers are expected to be ' 'monotonically increasing. Values: {}'.format(power_vals)) # Sanity check for idle_states powers if idle_states: power_vals = idle_states.values() if not is_monotonic(power_vals, decreasing=True): self._log.warning( 'Idle states powers are expected to be ' 'monotonically decreasing. Values: {}'.format(power_vals)) if cpu is not None and not name: name = 'cpu' + str(cpu) self.name = name self.active_states = active_states self.idle_states = idle_states @property def max_capacity(self): """Compute capacity at highest frequency""" return max(s.capacity for s in self.active_states.values()) class EnergyModelRoot(EnergyModelNode): """ Convenience class for root of an EnergyModelNode tree. Just like EnergyModelNode except that ``active_states`` and ``idle_states`` aren't required. """ def __init__(self, active_states=None, idle_states=None, cpu=None, children=None, name=None): return super(EnergyModelRoot, self).__init__( active_states, idle_states, cpu, children, name) class PowerDomain(_CpuTree): """Describes the power domain hierarchy for an EnergyModel. Power domains are a description of the topological dependencies in hardware for entering idle states. "Composite" states such as cluster-sleep states require a set of CPUs to all be idle before that state can be entered. In that case those CPUs can be grouped into a power domain, and that composite state attached to the power domain. Note that cpuidle is not aware of these dependencies; they are typically handled by the platform firmware. Exactly one of ``cpu`` and ``children`` must be given. That is, leaves of the PowerDomain tree always contain exactly one CPU - each CPU is represented as being in a power domain of its own. This represents the assumption that all CPUs have at least one idle state (such as ARM WFI) that they can enter independently of other CPUs. :param idle_states: List of names of idle states for this power domain. Does not store power data - these names are used as keys into the ``idle_states`` field of :class:`EnergyModelNode` objects. :type idle_states: list(str) :param cpu: The CPU this node represents. If provided, this is a leaf node. :type cpu: int :param children: Non-empty list of child :class:`PowerDomain` objects :type children: list(PowerDomain) :ivar cpus: CPUs contained in this node. Includes those of child nodes. :type cpus: tuple(int) """ def __init__(self, idle_states, cpu=None, children=None): if idle_states is None: raise ValueError('idle_states cannot be None (but may be empty)') super(PowerDomain, self).__init__(cpu, children) self.idle_states = idle_states class EnergyModel(object): """Represents hierarchical CPU topology with power and capacity data An energy model consists of - A CPU topology, representing the physical (cache/interconnect) topology of the CPUs. Each node stores the energy usage of that node's hardware when it is in each active or idle state. They also store a compute capacity at each frequency, but this is only meaningful for leaf nodes (CPUs) and may be None at higher levels. These capacity values are relative; the maximum capacity would usually be 1024, the value of SCHED_CAPACITY_SCALE in the Linux kernel scheduler. Use EnergyModelNodes to describe this. - A power domain topology, representing the hierarchy of areas that can be powered down (idled). The power domains are a single tree. Leaf nodes must contain exactly one CPU and the root node must indirectly contain every CPU. Each power domain has a list (maybe empty) of names of idle states that that domain can enter. Use PowerDomains to describe this. - A set of frequency domains, representing groups of CPUs whose clock frequencies must be equal (probably because they share a clock). The frequency domains must be a partition of the CPUs. :ivar cpu_nodes: List of leaf (CPU) :class:`EnergyModelNode` :ivar cpus: List of logical CPU numbers in the system :param root_node: Root of :class:`EnergyModelNode` tree :param root_power_domain: Root of :class:`PowerDomain` tree :param freq_domains: Collection of collections of logical CPU numbers representing frequency (clock) domains. .. note:: The most signficant shortcomings of the model are: 1. Voltage domains are assumed to be congruent to frequency domains 2. Idle state power is assumed to be independent of voltage 3. Temperature is ignored entirely .. _cpu-utils: .. admonition:: ``cpu_utils``: CPU util distributions Used throughout this module: A ``cpu_utils`` is a list ``u`` where ``u[N]`` is the sum of the frequency-invariant, capacity-invariant utilization of tasks placed on CPU N. That is, the quantity represented by a CPU runqueue's util_avg in the Linux kernel scheduler's load-tracking system with EAS features enabled. The range of utilization values is 0 - :attr:`EnergyModel.capacity_scale`. This represents a static utilization, assuming that tasks don't change in size (for example representing a set of fixed periodic RT-App workloads). For workloads that change over time, a series of ``cpu_utils`` items would be needed to describe the utilization, with a distinct estimation for each item in the series. """ capacity_scale = 1024 """The relative computational capacity of the most powerful CPU at its highest available frequency. """ def __init__(self, root_node, root_power_domain, freq_domains): self.cpus = root_node.cpus if self.cpus != tuple(range(len(self.cpus))): raise ValueError('CPU IDs [{}] are sparse'.format(self.cpus)) # Check that freq_domains is a partition of the CPUs fd_intersection = set().intersection(*freq_domains) if fd_intersection: raise ValueError('CPUs {} exist in multiple freq domains'.format( fd_intersection)) fd_difference = set(self.cpus) - set().union(*freq_domains) if fd_difference: raise ValueError('CPUs {} not in any frequency domain'.format( fd_difference)) self.freq_domains = freq_domains # Check that nodes with energy data are all within a frequency domain for node in root_node.iter_nodes(): if not node.active_states or node.idle_states: continue cpu_freq_doms = [] for cpu in node.cpus: [cpu_freq_dom] = [d for d in freq_domains if cpu in d] cpu_freq_doms.append(cpu_freq_dom) if not all(d == cpu_freq_doms[0] for d in cpu_freq_doms[1:]): raise ValueError( 'Node {} (CPUs {}) ' 'has energy data and overlaps freq domains'.format( node.name, node.cpus)) def sorted_leaves(root): # Get a list of the leaf (cpu) nodes of a _CpuTree in order of the # CPU ID ret = sorted(list(root.iter_leaves()), key=lambda n: n.cpus[0]) assert all(len(n.cpus) == 1 for n in ret) return ret self.root = root_node self.cpu_nodes = sorted_leaves(root_node) self.cpu_pds = sorted_leaves(root_power_domain) assert len(self.cpu_pds) == len(self.cpu_nodes) self._log = logging.getLogger('EnergyModel') max_cap = max(n.max_capacity for n in self.cpu_nodes) if max_cap != self.capacity_scale: self._log.debug( 'Unusual max capacity (%s), overriding capacity_scale', max_cap) self.capacity_scale = max_cap def _cpus_with_capacity(self, cap): """ Helper method to find the CPUs whose max capacity equals cap """ return [c for c in self.cpus if self.cpu_nodes[c].max_capacity == cap] @property @memoized def biggest_cpus(self): """ The CPUs with the highest compute capacity at their highest frequency """ return self._cpus_with_capacity(self.capacity_scale) @property @memoized def littlest_cpus(self): """ The CPUs with the lowest compute capacity at their highest frequency """ min_cap = min(n.max_capacity for n in self.cpu_nodes) return self._cpus_with_capacity(min_cap) @property @memoized def is_heterogeneous(self): """ True iff CPUs do not all have the same efficiency and OPP range """ states = self.cpu_nodes[0].active_states return any(c.active_states != states for c in self.cpu_nodes[1:]) @property @memoized def cpu_groups(self): """ List of lists of CPUs who share the same active state values """ groups = [] for node in self.cpu_nodes: for group in groups: group_states = self.cpu_nodes[group[0]].active_states if node.active_states == group_states: group.append(node.cpu) break else: groups.append([node.cpu]) return groups def _guess_idle_states(self, cpus_active): def find_deepest(pd): if not any(cpus_active[c] for c in pd.cpus): if pd.parent: parent_state = find_deepest(pd.parent) if parent_state: return parent_state return pd.idle_states[-1] if len(pd.idle_states) else None return None return [find_deepest(pd) for pd in self.cpu_pds] def get_cpu_capacity(self, cpu, freq=None): """Convenience method to get the capacity of a CPU at a given frequency :param cpu: CPU to get capacity for :param freq: Frequency to get the CPU capacity at. Default is max capacity. """ if freq is None: return self.cpu_nodes[cpu].max_capacity return self.cpu_nodes[cpu].active_states[freq].capacity def guess_idle_states(self, cpus_active): """Pessimistically guess the idle states that each CPU may enter If a CPU has any tasks it is estimated that it may only enter its shallowest idle state in between task activations. If all the CPUs within a power domain have no tasks, they will all be judged able to enter that domain's deepest idle state. If any CPU in a domain has work, no CPUs in that domain are assumed to enter any domain shared state. e.g. Consider a system with - two power domains PD0 and PD1 - 4 CPUs, with CPUs [0, 1] in PD0 and CPUs [2, 3] in PD1 - 4 idle states: "WFI", "cpu-sleep", "cluster-sleep-0" and "cluster-sleep-1", where the "cluster-sleep-*" states domain states, i.e. a CPU can only enter those states when both CPUs in the domain are idle. Then here are some example inputs and outputs: :: # All CPUs idle: [0, 0, 0, 0] -> ["cluster-sleep-1", "cluster-sleep-1", "cluster-sleep-1", "cluster-sleep-1"] # All CPUs have work [1, 1, 1, 1] -> ["WFI","WFI","WFI", "WFI"] # One power domain active, the other idle [0, 0, 1, 1] -> ["cluster-sleep-1", "cluster-sleep-1", "WFI","WFI"] # One CPU active. # Note that CPU 2 has no work but is assumed to never be able to enter # any "cluster" state. [0, 0, 0, 1] -> ["cluster-sleep-1", "cluster-sleep-1", "cpu-sleep","WFI"] :param cpus_active: list where bool(cpus_active[N]) is False iff no tasks will run on CPU N. :returns: List ``ret`` where ``ret[N]`` is the name of the estimated idle state that CPU N can enter during idle periods. """ states = self._guess_idle_states(cpus_active) return [s or c.idle_states.keys()[0] for s, c in zip(states, self.cpu_nodes)] def _guess_freqs(self, cpu_utils): overutilized = False # Find what frequency each CPU would need if it was alone in its # frequency domain ideal_freqs = [0 for _ in self.cpus] for node in self.cpu_nodes: [cpu] = node.cpus required_cap = cpu_utils[cpu] possible_freqs = [f for f, s in node.active_states.iteritems() if s.capacity >= required_cap] if possible_freqs: ideal_freqs[cpu] = min(possible_freqs) else: # CPU cannot provide required capacity, use max freq ideal_freqs[cpu] = max(node.active_states.keys()) overutilized = True # Rectify the frequencies among domains freqs = [0 for _ in ideal_freqs] for domain in self.freq_domains: domain_freq = max(ideal_freqs[c] for c in domain) for cpu in domain: freqs[cpu] = domain_freq return freqs, overutilized def guess_freqs(self, cpu_utils): """Work out CPU frequencies required to execute a workload Find the lowest possible frequency for each CPU that provides enough capacity to satisfy the utilization, taking into account frequency domains. :param cpu_utils: Utilization distribution, see :ref:`cpu_utils <cpu-utils>` :returns: List ``ret`` where ``ret[N]`` is the frequency that CPU N must run at """ freqs, _ = self._guess_freqs(cpu_utils) return freqs def _estimate_from_active_time(self, cpu_active_time, freqs, idle_states, combine): """Helper for estimate_from_cpu_util Like estimate_from_cpu_util but uses active time i.e. proportion of time spent not-idle in the range 0.0 - 1.0. If combine=False, return idle and active power as separate components. """ power = 0 ret = {} assert all(0.0 <= a <= 1.0 for a in cpu_active_time) for node in self.root.iter_nodes(): # Some nodes might not have energy model data, they could just be # used to group other nodes (likely the root node, for example). if not node.active_states or not node.idle_states: continue cpus = tuple(node.cpus) # For now we assume topology nodes with energy models do not overlap # with frequency domains freq = freqs[cpus[0]] assert all(freqs[c] == freq for c in cpus[1:]) # The active time of a node is estimated as the max of the active # times of its children. # This works great for the synthetic periodic workloads we use in # LISA (where all threads wake up at the same time) but is probably # no good for real workloads. active_time = max(cpu_active_time[c] for c in cpus) active_power = node.active_states[freq].power * active_time _idle_power = max(node.idle_states[idle_states[c]] for c in cpus) idle_power = _idle_power * (1 - active_time) if combine: ret[cpus] = active_power + idle_power else: ret[cpus] = {} ret[cpus]["active"] = active_power ret[cpus]["idle"] = idle_power return ret def estimate_from_cpu_util(self, cpu_utils, freqs=None, idle_states=None): """ Estimate the energy usage of the system under a utilization distribution Optionally also take freqs; a list of frequencies at which each CPU is assumed to run, and idle_states, the idle states that each CPU can enter between activations. If not provided, they will be estimated assuming an ideal selection system (i.e. perfect cpufreq & cpuidle governors). :param cpu_utils: Utilization distribution, see :ref:`cpu_utils <cpu-utils>` :param freqs: List of CPU frequencies. Got from :meth:`guess_freqs` by default. :param idle_states: List of CPU frequencies. Got from :meth:`guess_idle_states` by default. :returns: Dict with power in bogo-Watts (bW), with contributions from each system component keyed with a tuple of the CPUs comprising that component (i.e. :attr:EnergyModelNode.cpus) :: { (0,) : 10, (1,) : 10, (0, 1) : 5, } This represents CPUs 0 and 1 each using 10bW and their shared resources using 5bW for a total of 25bW. """ if len(cpu_utils) != len(self.cpus): raise ValueError( 'cpu_utils length ({}) must equal CPU count ({})'.format( len(cpu_utils), len(self.cpus))) if freqs is None: freqs = self.guess_freqs(cpu_utils) if idle_states is None: idle_states = self.guess_idle_states(cpu_utils) cpu_active_time = [] for cpu, node in enumerate(self.cpu_nodes): assert (cpu,) == node.cpus cap = node.active_states[freqs[cpu]].capacity cpu_active_time.append(min(float(cpu_utils[cpu]) / cap, 1.0)) return self._estimate_from_active_time(cpu_active_time, freqs, idle_states, combine=True) def get_optimal_placements(self, capacities): """Find the optimal distribution of work for a set of tasks Find a list of candidates which are estimated to be optimal in terms of power consumption, but that do not result in any CPU becoming over-utilized. If no such candidates exist, i.e. the system being modeled cannot satisfy the workload's throughput requirements, an :class:`EnergyModelCapacityError` is raised. For example, if e was an EnergyModel modeling two CPUs with capacity 1024, this error would be raised by: :: e.get_optimal_placements({"t1": 800, "t2": 800, "t3: "800"}) This estimation assumes an ideal system of selecting OPPs and idle states for CPUs. .. note:: This is a brute force search taking time exponential wrt. the number of tasks. :param capacities: Dict mapping tasks to expected utilization values. These tasks are assumed not to change; they have a single static utilization value. A set of single-phase periodic RT-App tasks is an example of a suitable workload for this model. :returns: List of ``cpu_utils`` items representing distributions of work under optimal task placements, see :ref:`cpu_utils <cpu-utils>`. Multiple task placements that result in the same CPU utilizations are considered equivalent. """ tasks = capacities.keys() num_candidates = len(self.cpus) ** len(tasks) self._log.debug( '%14s - Searching %d configurations for optimal task placement...', 'EnergyModel', num_candidates) candidates = {} excluded = [] for cpus in product(self.cpus, repeat=len(tasks)): placement = {task: cpu for task, cpu in zip(tasks, cpus)} util = [0 for _ in self.cpus] for task, cpu in placement.items(): util[cpu] += capacities[task] util = tuple(util) # Filter out candidate placements that have tasks greater than max # or that we have already determined that we cannot place. if (any(u > self.capacity_scale for u in util) or util in excluded): continue if util not in candidates: freqs, overutilized = self._guess_freqs(util) if overutilized: # This isn't a valid placement excluded.append(util) else: power = self.estimate_from_cpu_util(util, freqs=freqs) candidates[util] = sum(power.values()) if not candidates: # The system can't provide full throughput to this workload. raise EnergyModelCapacityError( "Can't handle workload - total cap = {}".format( sum(capacities.values()))) # Whittle down to those that give the lowest energy estimate min_power = min(p for p in candidates.itervalues()) ret = [u for u, p in candidates.iteritems() if p == min_power] self._log.debug('%14s - Done', 'EnergyModel') return ret @classmethod def _find_core_groups(cls, target): """ Read the core_siblings masks for each CPU from sysfs :param target: Devlib Target object to read masks from :returns: A list of tuples of ints, representing the partition of core siblings """ cpus = range(target.number_of_cpus) topology_base = '/sys/devices/system/cpu/' # We only care about core_siblings, but let's check *_siblings, so we # can throw an error if a CPU's thread_siblings isn't just itself, or if # there's a topology level we don't understand. # Since we might have to read a lot of files, read everything we need in # one go to avoid taking too long. mask_glob = topology_base + 'cpu**/topology/*_siblings' file_values = read_multiple_oneline_files(target, [mask_glob]) regex = re.compile( topology_base + r'cpu([0-9]+)/topology/([a-z]+)_siblings') ret = set() for path, mask_str in file_values.iteritems(): match = regex.match(path) cpu = int(match.groups()[0]) level = match.groups()[1] # mask_to_list returns the values in descending order, so we'll sort # them ascending. This isn't strictly necessary but it's nicer. siblings = tuple(sorted(mask_to_list(int(mask_str, 16)))) if level == 'thread': if siblings != (cpu,): # SMT systems aren't supported raise RuntimeError('CPU{} thread_siblings is {}. ' 'expected {}'.format(cpu, siblings, [cpu])) continue if level != 'core': # The only other levels we should expect to find are 'book' and # 'shelf', which are not used by architectures we support. raise RuntimeError( 'Unrecognised topology level "{}"'.format(level)) ret.add(siblings) # Sort core groups so that the lowest-numbered cores are first # Again, not strictly necessary, just more pleasant. return sorted(ret, key=lambda x: x[0]) @classmethod def from_target(cls, target): """ Create an EnergyModel by reading a target filesystem This uses the sysctl added by EAS pathces to exposes the cap_states and idle_states fields for each sched_group. This feature depends on CONFIG_SCHED_DEBUG, and is not upstream in mainline Linux (as of v4.11), so this method is only tested with Android kernels. The kernel doesn't have an power domain data, so this method assumes that all CPUs are totally independent wrt. idle states - the EnergyModel constructed won't be aware of the topological dependencies for entering "cluster" idle states. Assumes the energy model has two-levels (plus the root) - a level for CPUs and a level for 'clusters'. :param target: Devlib target object to read filesystem from. Must have cpufreq and cpuidle modules enabled. :returns: Constructed EnergyModel object based on the parameters reported by the target. """ if 'cpufreq' not in target.modules: raise TargetError('Requires cpufreq devlib module. Please ensure ' '"cpufreq" is listed in your target/test modules') if 'cpuidle' not in target.modules: raise TargetError('Requires cpuidle devlib module. Please ensure ' '"cpuidle" is listed in your target/test modules') def sge_path(cpu, domain, group, field): f = '/proc/sys/kernel/sched_domain/cpu{}/domain{}/group{}/energy/{}' return f.format(cpu, domain, group, field) # Read all the files we might need in one go, otherwise this will take # ages. sge_globs = [sge_path('**', '**', '**', 'cap_states'), sge_path('**', '**', '**', 'idle_states')] sge_file_values = read_multiple_oneline_files(target, sge_globs) if not sge_file_values: raise TargetError('Energy Model not exposed in sysfs. ' 'Check CONFIG_SCHED_DEBUG is enabled.') # These functions read the cap_states and idle_states vectors for the # first sched_group in the sched_domain for a given CPU at a given # level. That first group will include the given CPU. So # read_active_states(0, 0) will give the CPU-level active_states for # CPU0 and read_active_states(0, 1) will give the "cluster"-level # active_states for the "cluster" that contains CPU0. def read_sge_file(path): try: return sge_file_values[path] except KeyError as e: raise TargetError('No such file: {}'.format(e)) def read_active_states(cpu, domain_level): cap_states_path = sge_path(cpu, domain_level, 0, 'cap_states') cap_states_strs = read_sge_file(cap_states_path).split() # cap_states lists the capacity of each state followed by its power, # in increasing order. The `zip` call does this: # [c0, p0, c1, p1, c2, p2] -> [(c0, p0), (c1, p1), (c2, p2)] cap_states = [ActiveState(capacity=int(c), power=int(p)) for c, p in zip(cap_states_strs[0::2], cap_states_strs[1::2])] freqs = target.cpufreq.list_frequencies(cpu) return OrderedDict(zip(sorted(freqs), cap_states)) def read_idle_states(cpu, domain_level): idle_states_path = sge_path(cpu, domain_level, 0, 'idle_states') idle_states_strs = read_sge_file(idle_states_path).split() # get_states should return the state names in increasing depth order names = [s.name for s in target.cpuidle.get_states(cpu)] # idle_states is a list of power values in increasing order of # idle-depth/decreasing order of power. return OrderedDict(zip(names, [int(p) for p in idle_states_strs])) # Read the CPU-level data from sched_domain level 0 cpus = range(target.number_of_cpus) cpu_nodes = [] for cpu in cpus: node = EnergyModelNode( cpu=cpu, active_states=read_active_states(cpu, 0), idle_states=read_idle_states(cpu, 0)) cpu_nodes.append(node) # Read the "cluster" level data from sched_domain level 1 core_group_nodes = [] for core_group in cls._find_core_groups(target): node=EnergyModelNode( children=[cpu_nodes[c] for c in core_group], active_states=read_active_states(core_group[0], 1), idle_states=read_idle_states(core_group[0], 1)) core_group_nodes.append(node) root = EnergyModelRoot(children=core_group_nodes) # Use cpufreq to figure out the frequency domains freq_domains = [] remaining_cpus = set(cpus) while remaining_cpus: cpu = next(iter(remaining_cpus)) dom = target.cpufreq.get_domain_cpus(cpu) freq_domains.append(dom) remaining_cpus = remaining_cpus.difference(dom) # We don't have a way to read the power domains from sysfs (the kernel # isn't even aware of them) so we'll just have to assume each CPU is its # own power domain and all idle states are independent of each other. cpu_pds = [] for cpu in cpus: names = [s.name for s in target.cpuidle.get_states(cpu)] cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names)) root_pd=PowerDomain(children=cpu_pds, idle_states=[]) return cls(root_node=root, root_power_domain=root_pd, freq_domains=freq_domains)
apache-2.0
-8,393,402,549,132,906,000
40.385227
82
0.601444
false
jhprinz/openpathsampling
openpathsampling/tests/testbiasfunction.py
1
15351
from __future__ import division from __future__ import print_function from __future__ import absolute_import from builtins import range from builtins import object from past.utils import old_div from nose.tools import (assert_equal, assert_not_equal, assert_almost_equal, raises, assert_in) from nose.plugins.skip import Skip, SkipTest from .test_helpers import true_func, assert_equal_array_array, make_1d_traj import numpy as np import openpathsampling as paths from openpathsampling.bias_function import * import logging from openpathsampling import VolumeFactory as vf quiet_loggers = ["initialization", "ensemble", "netcdfplus.objects", "netcdfplus.netcdfplus", "pathmover", "netcdfplus.base"] for logger in quiet_loggers: logging.getLogger("openpathsampling."+logger).setLevel(logging.CRITICAL) class testBiasEnsembleTable(object): def setup(self): # create the network xval = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0]) self.stateA = paths.CVDefinedVolume(xval, -1.0, -0.5).named("A") self.stateB = paths.CVDefinedVolume(xval, 0.5, float("inf")).named("B") ifacesA = paths.VolumeInterfaceSet(xval, float(-1.0), [-0.5, -0.4, -0.3, -0.2]) self.network = paths.MISTISNetwork([ (self.stateA, ifacesA, self.stateB) ]) transition = self.network.transitions[(self.stateA, self.stateB)] ensembles = transition.ensembles self.xval = xval self.ifacesA = ifacesA # create the biases bias_table = {} bias_table[ensembles[0]] = 1.0 bias_table[ensembles[1]] = 0.5 bias_table[ensembles[2]] = 0.2 self.bias = BiasEnsembleTable.ratios_from_dictionary(bias_table) # samples, moves, changes traj = make_1d_traj([-0.55, -0.45, -0.35, -0.25, -0.15, -0.26, -0.36, -0.46, -0.56]) s0 = paths.Sample(replica=0, ensemble=ensembles[0], trajectory=traj) s1 = paths.Sample(replica=1, ensemble=ensembles[1], trajectory=traj) s2 = paths.Sample(replica=2, ensemble=ensembles[2], trajectory=traj) self.sample_set = paths.SampleSet([s0, s1, s2]) self.sample_set.sanity_check() move_01 = paths.EnsembleHopMover(ensembles[0], ensembles[1]) move_02 = paths.EnsembleHopMover(ensembles[0], ensembles[2]) move_12 = paths.EnsembleHopMover(ensembles[1], ensembles[2]) move_21 = paths.EnsembleHopMover(ensembles[2], ensembles[1]) move_20 = paths.EnsembleHopMover(ensembles[2], ensembles[0]) move_10 = paths.EnsembleHopMover(ensembles[1], ensembles[0]) # NOTE: all changes here are accepted self.change_01 = move_01.move(self.sample_set) self.change_02 = move_02.move(self.sample_set) self.change_12 = move_12.move(self.sample_set) self.change_21 = move_21.move(self.sample_set) self.change_20 = move_20.move(self.sample_set) self.change_10 = move_10.move(self.sample_set) # convenience lists for changes going outward vs. inward self.out_changes = [self.change_01, self.change_02, self.change_12] self.in_changes = [self.change_10, self.change_20, self.change_21] def test_bias_ensemble_new_to_old(self): # The o->n change is the denominator of the ratio. # for old_to_new, the probability of moving outerward depends on the # ratio of the probabilities of the two ensembles change_vals = { self.change_01 : 0.5, self.change_02 : 0.2, self.change_12 : old_div(0.2, 0.5), self.change_10 : old_div(1.0, 0.5), self.change_20 : old_div(1.0, 0.2), self.change_21 : old_div(0.5, 0.2) } for change in list(change_vals.keys()): test_val = min(1.0, change_vals[change]) assert_almost_equal( self.bias.probability_new_to_old(self.sample_set, change), test_val ) def test_bias_ensemble_old_to_new(self): # The n->o change is the numerator of the ratio. # prob of moving inward is the ratio of the interface weights (cf # test_bias_ensemble_old_to_new) change_vals = { self.change_10 : 0.5, self.change_20 : 0.2, self.change_21 : old_div(0.2, 0.5), self.change_01 : old_div(1.0, 0.5), self.change_02 : old_div(1.0, 0.2), self.change_12 : old_div(0.5, 0.2) } for change in list(change_vals.keys()): test_val = min(1.0, change_vals[change]) assert_almost_equal( self.bias.probability_old_to_new(self.sample_set, change), test_val ) def test_combo_bias(self): #TODO: we don't support this yet, but we need to at some point # test what happens if you have more than one sample in the change # if you do a multi-step bias (in the same direction), then it seems # to me that the total bias should be the same as if it were a # one-step. This is *not* true if hops are in different directions. # Then it depends on the product of the "upstream" hops ensembles = self.network.transitions[(self.stateA, self.stateB)].ensembles # all downstream move move_012 = paths.SequentialMover([ paths.EnsembleHopMover(ensembles[0], ensembles[1]), paths.EnsembleHopMover(ensembles[1], ensembles[2]) ]) change_012 = move_012.move(self.sample_set) # all upstream move move_210 = paths.SequentialMover([ paths.EnsembleHopMover(ensembles[2], ensembles[1]), paths.EnsembleHopMover(ensembles[1], ensembles[0]) ]) change_210 = move_210.move(self.sample_set) # assert_almost_equal( # self.bias.probability_old_to_new(change_210, self.sample_set), 1.0 # ) # assert_almost_equal( # self.bias.probability_new_to_old(change_210, self.sample_set), 0.2 # ) raise SkipTest def test_add_biases(self): # this is where we combine multiple biases into one ifacesA = self.ifacesA[:-1] xval2 = paths.FunctionCV(name="xB", f=lambda s : 0.5-s.xyz[0][0]) ifacesB = paths.VolumeInterfaceSet(xval2, float("-inf"), [0.0, 0.1, 0.2]) xval3 = paths.FunctionCV(name="xC", f=lambda s : s.xyz[0][0]-2.0) stateC = paths.CVDefinedVolume(self.xval, -3.0, 2.0) ifacesC = paths.VolumeInterfaceSet(xval3, -1.0, [0.0, 0.1, 0.2, 0.3]) network = paths.MISTISNetwork( [(self.stateA, ifacesA, self.stateB), (self.stateB, ifacesB, self.stateA), (stateC, ifacesC, self.stateA)], ms_outers=paths.MSOuterTISInterface.from_lambdas( {ifacesA: -0.2, ifacesB: 0.3} ) ) ens_A = network.transitions[(self.stateA, self.stateB)].ensembles ens_B = network.transitions[(self.stateB, self.stateA)].ensembles ens_C = network.transitions[(stateC, self.stateA)].ensembles ms_outer = list(network.special_ensembles['ms_outer'].keys())[0] dict_A = {ens_A[0]: 1.0, ens_A[1]: 0.5, ens_A[2]: 0.2, ms_outer: 0.1} dict_B = {ens_B[0]: 1.0, ens_B[1]: 0.6, ens_B[2]: 0.3, ms_outer: 0.15} dict_C = {ens_C[0]: 1.0, ens_C[1]: 0.8, ens_C[2]: 0.2} bias_A = BiasEnsembleTable.ratios_from_dictionary(dict_A) bias_B = BiasEnsembleTable.ratios_from_dictionary(dict_B) bias_C = BiasEnsembleTable.ratios_from_dictionary(dict_C) bias_AB = bias_A + bias_B # check the ensembles_to_ids assert_equal(len(bias_AB.ensembles_to_ids), 7) for ens in ens_A: assert_in(bias_AB.ensembles_to_ids[ens], [0, 1, 2]) for ens in ens_B: assert_in(bias_AB.ensembles_to_ids[ens], [3, 4, 5]) assert_equal(bias_AB.ensembles_to_ids[ms_outer], 6) # check values df_A = bias_A.dataframe df_B = bias_B.dataframe df_AB = bias_AB.dataframe col_A_msouter = bias_A.ensembles_to_ids[ms_outer] col_B_msouter = bias_B.ensembles_to_ids[ms_outer] col_AB_msouter = bias_AB.ensembles_to_ids[ms_outer] for ens1 in ens_A: idx_A = bias_A.ensembles_to_ids[ens1] idx_AB = bias_AB.ensembles_to_ids[ens1] for ens2 in ens_A: col_A = bias_A.ensembles_to_ids[ens2] col_AB = bias_AB.ensembles_to_ids[ens2] val_A = df_A.loc[idx_A, col_A] val_AB = df_AB.loc[idx_AB, col_AB] assert_equal(val_A, val_AB) for ens2 in ens_B: col_AB = bias_AB.ensembles_to_ids[ens2] assert_equal(np.isnan(df_AB.loc[idx_AB, col_AB]), True) assert_equal(df_A.loc[idx_A, col_A_msouter], df_AB.loc[idx_AB, col_AB_msouter]) assert_equal(df_A.loc[col_A_msouter, idx_A], df_AB.loc[col_AB_msouter, idx_AB]) for ens1 in ens_B: idx_B = bias_B.ensembles_to_ids[ens1] idx_AB = bias_AB.ensembles_to_ids[ens1] for ens2 in ens_B: col_B = bias_B.ensembles_to_ids[ens2] col_AB = bias_AB.ensembles_to_ids[ens2] val_B = df_B.loc[idx_B, col_B] val_AB = df_AB.loc[idx_AB, col_AB] assert_equal(val_B, val_AB) for ens2 in ens_A: col_AB = bias_AB.ensembles_to_ids[ens2] assert_equal(np.isnan(df_AB.loc[idx_AB, col_AB]), True) assert_equal(df_B.loc[idx_B, col_B_msouter], df_AB.loc[idx_AB, col_AB_msouter]) assert_equal(df_B.loc[col_B_msouter, idx_B], df_AB.loc[col_AB_msouter, idx_AB]) # just to make sure no errors raise when there are NaNs in table bias_ABC = bias_A + bias_B + bias_C class testSRTISBiasFromNetwork(object): def setup(self): xval = paths.CoordinateFunctionCV(name="xA", f=lambda s : s.xyz[0][0]) self.stateA = paths.CVDefinedVolume(xval, -1.0, -0.5).named("A") self.stateB = paths.CVDefinedVolume(xval, 0.5, float("inf")).named("B") self.ifacesA = paths.VolumeInterfaceSet(xval, -1.0, [-0.5, -0.4, -0.3, -0.2]) self.ifacesB = paths.VolumeInterfaceSet(xval, [0.5, 0.4, 0.3, 0.2], 1.0) self.tcp_A = paths.numerics.LookupFunction( ordinate=[-0.5, -0.4, -0.3, -0.2, -0.1], abscissa=[1.0, 0.5, 0.25, 0.125, 0.0625] ) self.tcp_B = paths.numerics.LookupFunction( ordinate=[0.5, 0.4, 0.3, 0.2, 0.1], abscissa=[1.0, 0.2, 0.04, 0.008, 0.0016] ) def test_bias_from_network(self): # force the TCP in network = paths.MISTISNetwork([ (self.stateA, self.ifacesA, self.stateB) ]) network.sampling_transitions[0].tcp = self.tcp_A bias = paths.SRTISBiasFromNetwork(network) transition = list(network.transitions.values())[0] # only one # check reciprocal of symmetric partners for i in range(4): for j in range(i, 4): assert_equal(bias.dataframe.loc[i, j], old_div(1.0, bias.dataframe.loc[j, i])) for i in range(len(transition.ensembles) - 1): ens_to = transition.ensembles[i] ens_from = transition.ensembles[i + 1] assert_equal(bias.bias_value(ens_from, ens_to), 0.5) for i in range(len(transition.ensembles) - 2): ens_to = transition.ensembles[i] ens_from = transition.ensembles[i + 2] assert_equal(bias.bias_value(ens_from, ens_to), 0.25) @raises(RuntimeError) def test_fail_without_tcp(self): network = paths.MISTISNetwork([ (self.stateA, self.ifacesA, self.stateB) ]) bias = paths.SRTISBiasFromNetwork(network) @raises(RuntimeError) def test_fail_without_lambdas(self): fake_ifaceA = paths.InterfaceSet(cv=self.ifacesA.cv, volumes=self.ifacesA.volumes, direction=self.ifacesA.direction) network = paths.MISTISNetwork([ (self.stateA, fake_ifaceA, self.stateB) ]) network.sampling_transitions[0].tcp = self.tcp_A bias = paths.SRTISBiasFromNetwork(network) def test_bias_from_ms_network(self): ms_outer = paths.MSOuterTISInterface.from_lambdas( {self.ifacesA : -0.1, self.ifacesB : 0.1} ) network = paths.MISTISNetwork( [(self.stateA, self.ifacesA, self.stateB), (self.stateB, self.ifacesB, self.stateA)], ms_outers=[ms_outer] ) transition_AB = None transition_BA = None for t in network.sampling_transitions: if t.stateA == self.stateA: t.tcp = self.tcp_A transition_AB = t elif t.stateA == self.stateB: t.tcp = self.tcp_B transition_BA = t else: print([t.stateA, t.stateB]) print([self.stateA, self.stateB]) raise RuntimeError("Weird states in test transition") bias = paths.SRTISBiasFromNetwork(network) n_ensembles = len(bias.dataframe.index) for i in range(n_ensembles): for j in range(i, n_ensembles): if not np.isnan(bias.dataframe.loc[i, j]): np.testing.assert_almost_equal( bias.dataframe.loc[i, j], old_div(1.0, bias.dataframe.loc[j, i]) ) for i in range(len(transition_AB.ensembles) - 1): ens_to = transition_AB.ensembles[i] ens_from = transition_AB.ensembles[i + 1] assert_almost_equal(bias.bias_value(ens_from, ens_to), 0.5) for i in range(len(transition_BA.ensembles) - 1): ens_to = transition_BA.ensembles[i] ens_from = transition_BA.ensembles[i + 1] assert_almost_equal(bias.bias_value(ens_from, ens_to), 0.2) for ensA in transition_AB.ensembles: for ensB in transition_BA.ensembles: assert_equal(np.isnan(bias.bias_value(ensA, ensB)), True) assert_equal(np.isnan(bias.bias_value(ensB, ensA)), True) assert_almost_equal(bias.bias_value(transition_BA.ensembles[-1], network.ms_outers[0]), old_div(5.0, 2)) assert_almost_equal(bias.bias_value(transition_AB.ensembles[-1], network.ms_outers[0]), old_div(2.0, 2)) raise SkipTest
lgpl-2.1
5,314,157,043,321,641,000
41.879888
82
0.558205
false
mortonjt/American-Gut
scripts/make_phyla_plots_AGP.py
1
11859
#!/usr/bin/env python from __future__ import division from os.path import isfile, exists from os.path import join as pjoin from os import mkdir from numpy import zeros, mean, array from argparse import ArgumentParser from biom.parse import parse_biom_table from americangut.make_phyla_plots import (map_to_2D_dict, render_barchart, summarize_common_categories, load_category_files, parse_category_files) __author__ = "Justine Debelius" __copyright__ = "Copyright 2013, The American Gut Project" __credits__ = ["Justine Debelius", "Daniel McDonald"] __license__ = "BSD" __version__ = "unversioned" __maintainer__ = "Justine Debelius" __email__ = "Justine.Debelius@colorado.edu" SAMPLE_TYPES = set(('fecal', 'oral', 'skin')) def main(otu_table, mapping_data, cat_tables, output_dir, sample_type='fecal', samples_to_plot=None, legend=False, xaxis=True, debug=False): """Creates stacked bar plots for an otu table INPUTS: otu_table -- an open OTU table mapping_data -- a tab delimited string containing the mapping data passed from the mapping file. categories -- a dictionary keying a mapping category to the corresponding biom table output_dir -- the location of the directory where output files should be saved. If this directory does not exist, it will be created. samples_to_plot -- a list of sample ids to plot. If no value is passed, then all samples in the biom table are analyzed. debug -- ignore properly handling Michael Pollan's sample OUTPUTS: A pdf of stacked taxonomy will be generated for each sample and saved in the output directory. These will follow the file name format Figure_4_<SAMPLEID>.pdf """ # Sets constants for analyzing the data LEVEL = 2 CATEGORY = 'taxonomy' NUM_TAXA = 9 NUM_CATS_TO_PLOT = 7 # Sets up file name constants FILEPREFIX = 'Figure_4_' FILE_END = '.pdf' # Sets up plotting constants COLORMAP = array([[0.8353, 0.2421, 0.3098], [0.9569, 0.4275, 0.2627], [0.9922, 0.6824, 0.3804], [0.9961, 0.8784, 0.5351], [0.9020, 0.9608, 0.5961], [0.6706, 0.8667, 0.6431], [0.4000, 0.7608, 0.6471], [0.1961, 0.5333, 0.7412], [0.3333, 0.3333, 0.3333]]) FIG_DIMS = (4.44444, 3.33333) AXIS_DIMS = array([[0.05, 0.05], [0.95, 0.95]]) # Common taxa are designated before processing to remain constant. COMMON_TAXA = [(u'k__Bacteria', u'p__Firmicutes'), (u'k__Bacteria', u'p__Bacteroidetes'), (u'k__Bacteria', u'p__Proteobacteria'), (u'k__Bacteria', u'p__Actinobacteria'), (u'k__Bacteria', u'p__Verrucomicrobia'), (u'k__Bacteria', u'p__Tenericutes'), (u'k__Bacteria', u'p__Cyanobacteria'), (u'k__Bacteria', u'p__Fusobacteria')] SKIPSET = set(('Sample', 'Average', 'MP')) # Names categories being plotted if sample_type == 'fecal': michael_pollan = '000007108.1075657' cat_list = ['You', 'Average', 'Similar Diet', ' Similar BMI', 'Same Gender', 'Similar Age', 'Michael Pollan'] order = ['Sample', 'Average', 'DIET_TYPE', 'BMI_CATEGORY', 'SEX', 'AGE_CATEGORY', 'MP'] elif sample_type == 'skin': michael_pollan = '7113.1075702' cat_list = ['You', 'Average', 'Similar Cosmetic Use', 'Same Dominant Hand', 'Same Gender', 'Same Age', 'Michael Pollan'] order = ['Sample', 'Average', 'COSMETICS_FREQUENCY', 'DOMINANT_HAND', 'SEX', 'AGE_CATEGORY', 'MP'] elif sample_type == 'oral': michael_pollan = '7109.1075688' cat_list = ['You', 'Average', 'Similar Diet', 'Flossing Frequency', 'Same Gender', 'Same Age', 'Michael Pollan'] order = ['Sample', 'Average', 'DIET_TYPE', 'FLOSSING_FREQUENCY', 'SEX', 'AGE_CATEGORY', 'MP'] else: raise ValueError('%s is not a supported sample type.' % sample_type) # Gets the mapping file map_dict = map_to_2D_dict(mapping_data) # Gets the category file dictionary summarized with the common categories # Generates the category file dictionary categories = parse_category_files(raw_tables=cat_tables, common_groups=COMMON_TAXA[:8], level=LEVEL, metadata=CATEGORY) # Summarizes taxonomy for the category (whole_sample_ids, whole_summary, new_common_taxa) = \ summarize_common_categories(biom_table=otu_table, level=LEVEL, common_categories=COMMON_TAXA[:8], metadata_category=CATEGORY) # Converts the final taxa to a cleaned up list # Converts final taxa to a clean list common_phyla = [] for taxon in new_common_taxa: common_phyla.append(taxon[1].strip(' p__').strip('[').strip(']')) new_common_taxa = common_phyla # Checks that the crrect sample ids are plotted if samples_to_plot is None: sample_ids = whole_sample_ids else: sample_ids = samples_to_plot # Identifies Michael Pollan's pre-ABX sample if debug: mp_sample_pos = 2 else: mp_sample_pos = whole_sample_ids.tolist().index(michael_pollan) mp_sample_taxa = whole_summary[:, mp_sample_pos] # Gets the table average table_average = mean(whole_summary, 1) # Generates a figure for each sample for idx, sample_id in enumerate(whole_sample_ids): if sample_id in sample_ids: meta_data = map_dict[sample_id] # Prealocates a numpy array to hold the data tax_array = zeros((NUM_TAXA, NUM_CATS_TO_PLOT)) # Adds preset values to the array so the first column is the sample # the second column is the average and the last column is Michael # Pollan tax_array[:, 0] = whole_summary[:, idx] tax_array[:, 1] = table_average tax_array[:, -1] = mp_sample_taxa # Adds the categories to the table in the listed order for idx, cat in enumerate(order): # Skips over undesired categories if cat in SKIPSET: continue # Gets the sample metadata mapping_key = meta_data[cat] # Pulls taxonomic summary and group descriptions tax_summary = categories[cat]['Summary'] group_descriptions = categories[cat]['Groups'].tolist() # Appends plotting tables try: mapping_col = group_descriptions.index(mapping_key) except: raise ValueError('The %s cannot be found in %s.' % (mapping_key, cat)) tax_array[:, idx] = tax_summary[:, mapping_col] # Sets up the file to save the data filename = pjoin(output_dir, '%s%s%s' % (FILEPREFIX, sample_id, FILE_END)) # Plots the data render_barchart(data_table=tax_array, x_axis=False, group_names=new_common_taxa, legend=False, sample_names=cat_list, y_axis=False, axis_dims=AXIS_DIMS, fig_dims=FIG_DIMS, file_out=filename, show_edge=False, colors=COLORMAP) # Sets up the command line interface # Creates the parser object parser = ArgumentParser(description='Creates stacked bar plots for an OTU' ' table.') parser.add_argument('-i', '--input', required=True, help='OTU table file path [REQUIRED]') parser.add_argument('-m', '--mapping', required=True, help='Mapping file path [REQUIRED]') parser.add_argument('-o', '--output', required=True, help='Path to the output directory [REQUIRED]') parser.add_argument('-c', '--categories', help='Category associations with a collapsed OTU file ' 'path. The string should be associated with a colon, for' ' example, "SEX:sex.biom,DIET_TYPE:diet.biom"') parser.add_argument('-s', '--samples_to_plot', default=None, help='Sample IDs you wish to plot. If no value is ' 'specified, all samples are plotted.') parser.add_argument('-t', '--sample_type', default='fecal', help='Specifies the sample type: fecal, oral, or skin. ' 'DEFAULT: fecal') parser.add_argument('-d', '--debug', default=False, action='store_true', help='Ignore handling of MPs sample') if __name__ == '__main__': args = parser.parse_args() # Checks the biom table is sane if not args.input: parser.error("An input BIOM table is required.") elif not isfile(args.input): parser.error('The supplied biom table does not exist in the path.') else: import h5py f = h5py.File(args.input) otu_table = parse_biom_table(f) # Checks the mapping file is sane if not args.mapping: parser.error("An input mapping file is required.") elif not isfile(args.mapping): parser.error('he supplied file does not exist in the path') else: mapping = open(args.mapping, 'U') # Checks the output directory is sane if not args.output: parser.error("An output directory is required.") elif not exists(args.output): mkdir(args.output) output_dir = args.output # Parses the category argument if not args.categories: categories = {} else: cat_set = [c for c in args.categories.split(',')] category_fp = {c.strip().split(':')[0]: c.strip().split(':')[1] for c in cat_set} categories = load_category_files(category_files=category_fp) # Deals with the sample list if args.samples_to_plot: samples = args.samples_to_plot samples = samples.split(',') else: samples = None # Checks the sample type is sane if args.sample_type: if args.sample_type in SAMPLE_TYPES: sample_type = args.sample_type else: parser.error('%s is not a supported sample type.' % args.sample_type) else: sample_type = 'fecal' main(otu_table, mapping, output_dir=output_dir, cat_tables=categories, samples_to_plot=samples, sample_type=sample_type, debug=args.debug) # Commentary on the selection of common taxa: # # Common taxa can be calculated using the function, # identify_most_common_categories. When this was run on rounds 1, 2, and 3 of # the American Gut for fecal and all sites equally weighted, and for the HMP # for fecal only and equal weights on the fecal, skin and oral sites.
bsd-3-clause
8,155,955,832,743,465,000
37.131833
79
0.548023
false
FRC1296/CheezyDriver2016
frc971/control_loops/python/controls.py
1
5336
#!/usr/bin/python """ Control loop pole placement library. This library will grow to support many different pole placement methods. Currently it only supports direct pole placement. """ __author__ = 'Austin Schuh (austin.linux@gmail.com)' import numpy import slycot import scipy.signal.cont2discrete import glog class Error (Exception): """Base class for all control loop exceptions.""" class PolePlacementError(Error): """Exception raised when pole placement fails.""" # TODO(aschuh): dplace should take a control system object. # There should also exist a function to manipulate laplace expressions, and # something to plot bode plots and all that. def dplace(A, B, poles, alpha=1e-6): """Set the poles of (A - BF) to poles. Args: A: numpy.matrix(n x n), The A matrix. B: numpy.matrix(n x m), The B matrix. poles: array(imaginary numbers), The poles to use. Complex conjugates poles must be in pairs. Raises: ValueError: Arguments were the wrong shape or there were too many poles. PolePlacementError: Pole placement failed. Returns: numpy.matrix(m x n), K """ # See http://www.icm.tu-bs.de/NICONET/doc/SB01BD.html for a description of the # fortran code that this is cleaning up the interface to. n = A.shape[0] if A.shape[1] != n: raise ValueError("A must be square") if B.shape[0] != n: raise ValueError("B must have the same number of states as A.") m = B.shape[1] num_poles = len(poles) if num_poles > n: raise ValueError("Trying to place more poles than states.") out = slycot.sb01bd(n=n, m=m, np=num_poles, alpha=alpha, A=A, B=B, w=numpy.array(poles), dico='D') A_z = numpy.matrix(out[0]) num_too_small_eigenvalues = out[2] num_assigned_eigenvalues = out[3] num_uncontrollable_eigenvalues = out[4] K = numpy.matrix(-out[5]) Z = numpy.matrix(out[6]) if num_too_small_eigenvalues != 0: raise PolePlacementError("Number of eigenvalues that are too small " "and are therefore unmodified is %d." % num_too_small_eigenvalues) if num_assigned_eigenvalues != num_poles: raise PolePlacementError("Did not place all the eigenvalues that were " "requested. Only placed %d eigenvalues." % num_assigned_eigenvalues) if num_uncontrollable_eigenvalues != 0: raise PolePlacementError("Found %d uncontrollable eigenvlaues." % num_uncontrollable_eigenvalues) return K def c2d(A, B, dt): """Converts from continuous time state space representation to discrete time. Returns (A, B). C and D are unchanged.""" ans_a, ans_b, _, _, _ = scipy.signal.cont2discrete((A, B, None, None), dt) return numpy.matrix(ans_a), numpy.matrix(ans_b) def ctrb(A, B): """Returns the controllability matrix. This matrix must have full rank for all the states to be controllable. """ n = A.shape[0] output = B intermediate = B for i in xrange(0, n): intermediate = A * intermediate output = numpy.concatenate((output, intermediate), axis=1) return output def dlqr(A, B, Q, R): """Solves for the optimal lqr controller. x(n+1) = A * x(n) + B * u(n) J = sum(0, inf, x.T * Q * x + u.T * R * u) """ # P = (A.T * P * A) - (A.T * P * B * numpy.linalg.inv(R + B.T * P *B) * (A.T * P.T * B).T + Q P, rcond, w, S, T = slycot.sb02od( n=A.shape[0], m=B.shape[1], A=A, B=B, Q=Q, R=R, dico='D') F = numpy.linalg.inv(R + B.T * P *B) * B.T * P * A return F def kalman(A, B, C, Q, R): """Solves for the steady state kalman gain and covariance matricies. Args: A, B, C: SS matricies. Q: The model uncertantity R: The measurement uncertainty Returns: KalmanGain, Covariance. """ I = numpy.matrix(numpy.eye(Q.shape[0])) Z = numpy.matrix(numpy.zeros(Q.shape[0])) n = A.shape[0] m = C.shape[0] controllability_rank = numpy.linalg.matrix_rank(ctrb(A.T, C.T)) if controllability_rank != n: glog.warning('Observability of %d != %d, unobservable state', controllability_rank, n) # Compute the steady state covariance matrix. P_prior, rcond, w, S, T = slycot.sb02od(n=n, m=m, A=A.T, B=C.T, Q=Q, R=R, dico='D') S = C * P_prior * C.T + R K = numpy.linalg.lstsq(S.T, (P_prior * C.T).T)[0].T P = (I - K * C) * P_prior return K, P def TwoStateFeedForwards(B, Q): """Computes the feed forwards constant for a 2 state controller. This will take the form U = Kff * (R(n + 1) - A * R(n)), where Kff is the feed-forwards constant. It is important that Kff is *only* computed off the goal and not the feed back terms. Args: B: numpy.Matrix[num_states, num_inputs] The B matrix. Q: numpy.Matrix[num_states, num_states] The Q (cost) matrix. Returns: numpy.Matrix[num_inputs, num_states] """ # We want to find the optimal U such that we minimize the tracking cost. # This means that we want to minimize # (B * U - (R(n+1) - A R(n)))^T * Q * (B * U - (R(n+1) - A R(n))) # TODO(austin): This doesn't take into account the cost of U return numpy.linalg.inv(B.T * Q * B) * B.T * Q.T
bsd-2-clause
-1,647,065,824,229,413,000
29.843931
95
0.616004
false
dymkowsk/mantid
scripts/HFIRPowderReduction/HfirPDReductionGUI.py
2
92034
#pylint: disable=invalid-name, relative-import, too-many-lines,too-many-instance-attributes,too-many-arguments ################################################################################ # Main class for HFIR powder reduction GUI # Key word for future developing: FUTURE, NEXT, REFACTOR, RELEASE 2.0 ################################################################################ from __future__ import (absolute_import, division, print_function) from six.moves import range import numpy import os try: import urllib.request as urllib except ImportError: import urllib from .ui_MainWindow import Ui_MainWindow #import line for the UI python class from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s import mantid import mantidqtpython as mqt from . import HfirPDReductionControl #----- default configuration --------------- DEFAULT_SERVER = 'http://neutron.ornl.gov/user_data' DEFAULT_INSTRUMENT = 'hb2a' DEFAULT_WAVELENGTH = 2.4100 #------------------------------------------- class EmptyError(Exception): """ Exception for finding empty input for integer or float """ def __init__(self, value): """ Init """ Exception.__init__(self) self.value = value def __str__(self): return repr(self.value) class MultiScanTabState(object): """ Description of the state of the multi-scan-tab is in """ NO_OPERATION = 0 RELOAD_DATA = 1 REDUCE_DATA = 2 def __init__(self): """ Initialization :return: """ self._expNo = -1 self._scanList = [] self._xMin = None self._xMax = None self._binSize = 0 self._unit = '' self._plotRaw = False self._useDetEfficiencyCorrection = False self._excludeDetectors = [] def compare_state(self, tab_state): """ Compare this tab state and another tab state :param tab_state: :return: """ if isinstance(tab_state, MultiScanTabState) is False: raise NotImplementedError('compare_state must have MultiScanTabStatus as input.') if self._expNo != tab_state.getExpNumber() or self._scanList != tab_state.getScanList: return self.RELOAD_DATA for attname in self.__dict__.keys(): if self.__getattribute__(attname) != tab_state.__getattribute__(attname): return self.REDUCE_DATA return self.NO_OPERATION def getExpNumber(self): """ Get experiment number :return: """ return self._expNo def getScanList(self): """ Get the list of scans :return: """ return self._scanList[:] #pyline: disable=too-many-arguments def setup(self, exp_no, scan_list, min_x, max_x, bin_size, unit, raw, correct_det_eff, exclude_dets): """ Set up the object :param exp_no: :param scan_list: :param min_x: :param max_x: :param bin_size: :param unit: :param raw: :param correct_det_eff: :param exclude_dets: :return: """ self._expNo = int(exp_no) if isinstance(scan_list, list) is False: raise NotImplementedError('Scan_List must be list!') self._scanList = scan_list self._xMin = min_x self._xMax = max_x self._binSize = float(bin_size) self._unit = str(unit) self._plotRaw = raw self._useDetEfficiencyCorrection = correct_det_eff self._excludeDetectors = exclude_dets return #pylint: disable=too-many-public-methods,too-many-branches,too-many-locals,too-many-statements class MainWindow(QtGui.QMainWindow): """ Class of Main Window (top) """ # Copy to ui.setupUI # # Version 3.0 + Import for ui_MainWindow.py # from MplFigureCanvas import Qt4MplCanvas # # Replace 'self.graphicsView = QtGui.QtGraphicsView' with the following # self.graphicsView = Qt4MplCanvas(self.centralwidget) # self.mainplot = self.graphicsView.getPlot() def __init__(self, parent=None): """ Initialization and set up """ # Base class QtGui.QMainWindow.__init__(self,parent) # UI Window (from Qt Designer) self.ui = Ui_MainWindow() self.ui.setupUi(self) # Define gui-event handling # menu self.connect(self.ui.actionQuit, QtCore.SIGNAL('triggered()'), self.doExist) self.connect(self.ui.actionFind_Help, QtCore.SIGNAL('triggered()'), self.doHelp) # main self.connect(self.ui.comboBox_wavelength, QtCore.SIGNAL('currentIndexChanged(int)'), self.doUpdateWavelength) self.connect(self.ui.pushButton_browseExcludedDetFile, QtCore.SIGNAL('clicked()'), self.doBrowseExcludedDetetorFile) self.connect(self.ui.checkBox_useDetExcludeFile, QtCore.SIGNAL('stateChanged(int)'), self.do_enable_excluded_dets) # tab 'Raw Detectors' self.connect(self.ui.pushButton_plotRaw, QtCore.SIGNAL('clicked()'), self.doPlotRawPtMain) self.connect(self.ui.pushButton_ptUp, QtCore.SIGNAL('clicked()'), self.do_plot_raw_pt_prev) self.connect(self.ui.pushButton_ptDown, QtCore.SIGNAL('clicked()'), self.doPlotRawPtNext) self.connect(self.ui.pushButton_clearRawDets, QtCore.SIGNAL('clicked()'), self.doClearRawDetCanvas) # tab 'Individual Detectors' self.connect(self.ui.pushButton_plotIndvDet, QtCore.SIGNAL('clicked()'), self.doPlotIndvDetMain) self.connect(self.ui.pushButton_plotPrevDet, QtCore.SIGNAL('clicked()'), self.doPlotIndvDetPrev) self.connect(self.ui.pushButton_plotNextDet, QtCore.SIGNAL('clicked()'), self.doPlotIndvDetNext) self.connect(self.ui.pushButton_clearCanvasIndDet, QtCore.SIGNAL('clicked()'), self.doClearIndDetCanvas) self.connect(self.ui.pushButton_plotLog , QtCore.SIGNAL('clicked()'), self.do_plot_sample_log) # tab 'Normalized' self.connect(self.ui.pushButton_loadData, QtCore.SIGNAL('clicked()'), self.doLoadData) self.connect(self.ui.pushButton_prevScan, QtCore.SIGNAL('clicked()'), self.doLoadReduceScanPrev) self.connect(self.ui.pushButton_nextScan, QtCore.SIGNAL('clicked()'), self.doLoadReduceScanNext) self.connect(self.ui.pushButton_unit2theta, QtCore.SIGNAL('clicked()'), self.doReduce2Theta) self.connect(self.ui.pushButton_unitD, QtCore.SIGNAL('clicked()'), self.doReduceDSpacing) self.connect(self.ui.pushButton_unitQ, QtCore.SIGNAL('clicked()'), self.doReduceQ) self.connect(self.ui.pushButton_saveData, QtCore.SIGNAL('clicked()'), self.doSaveData) self.connect(self.ui.pushButton_clearTab2Canvas, QtCore.SIGNAL('clicked()'), self.doClearCanvas) # tab 'Multiple Scans' self.connect(self.ui.pushButton_loadMultData, QtCore.SIGNAL('clicked()'), self.doLoadSetData) self.connect(self.ui.pushButton_mscanBin, QtCore.SIGNAL('clicked()'), self.doReduceSetData) self.connect(self.ui.pushButton_mergeScans, QtCore.SIGNAL('clicked()'), self.doMergeScans) self.connect(self.ui.pushButton_viewMScan1D, QtCore.SIGNAL('clicked()'), self.doMergeScanView1D) self.connect(self.ui.pushButton_view2D, QtCore.SIGNAL('clicked()'), self.doMergeScanView2D) self.connect(self.ui.pushButton_viewMerge, QtCore.SIGNAL('clicked()'), self.doMergeScanViewMerged) self.connect(self.ui.pushButton_clearMultCanvas, QtCore.SIGNAL('clicked()'), self.doClearMultiRunCanvas) self.connect(self.ui.pushButton_saveAllIndScans, QtCore.SIGNAL('clicked()'), self.doSaveMultipleScans) self.connect(self.ui.pushButton_saveMerge, QtCore.SIGNAL('clicked()'), self.doSaveMergedScan) self.connect(self.ui.pushButton_plotRawMultiScans, QtCore.SIGNAL('clicked()'), self.do_convert_plot_multi_scans) # tab 'Vanadium' self.connect(self.ui.pushButton_stripVanPeaks, QtCore.SIGNAL('clicked()'), self.doStripVandiumPeaks) self.connect(self.ui.pushButton_saveVanRun, QtCore.SIGNAL('clicked()'), self.doSaveVanRun) self.connect(self.ui.pushButton_rebin2Theta, QtCore.SIGNAL('clicked()'), self.doReduceVanadium2Theta) self.connect(self.ui.pushButton_smoothVanData, QtCore.SIGNAL('clicked()'), self.doSmoothVanadiumData) self.connect(self.ui.pushButton_applySmooth, QtCore.SIGNAL('clicked()'), self.doSmoothVanadiumApply) self.connect(self.ui.pushButton_undoSmooth, QtCore.SIGNAL('clicked()'), self.doSmoothVanadiumUndo) # tab 'Advanced Setup' self.connect(self.ui.pushButton_browseCache, QtCore.SIGNAL('clicked()'), self.doBrowseCache) self.connect(self.ui.radioButton_useServer, QtCore.SIGNAL('clicked()'), self.doChangeSrcLocation) self.connect(self.ui.radioButton_useLocal, QtCore.SIGNAL('clicked()'), self.doChangeSrcLocation) self.connect(self.ui.pushButton_browseLocalSrc, QtCore.SIGNAL('clicked()'), self.doBrowseLocalDataSrc) self.connect(self.ui.pushButton_chkServer, QtCore.SIGNAL('clicked()'), self.doCheckSrcServer) # Define signal-event handling # define event handlers for matplotlib canvas self.ui.graphicsView_mergeRun.canvas.mpl_connect('button_press_event', self.on_mouseDownEvent) self.ui.graphicsView_mergeRun.canvas.mpl_connect('motion_notify_event', self.on_mouseMotion) # Widget type definition validator0 = QtGui.QIntValidator(self.ui.lineEdit_expNo) validator0.setBottom(1) self.ui.lineEdit_expNo.setValidator(validator0) validator1 = QtGui.QIntValidator(self.ui.lineEdit_expNo) validator1.setBottom(1) self.ui.lineEdit_scanNo.setValidator(validator1) validator2 = QtGui.QDoubleValidator(self.ui.lineEdit_wavelength) validator2.setBottom(0.) self.ui.lineEdit_wavelength.setValidator(validator2) validator3 = QtGui.QDoubleValidator(self.ui.lineEdit_xmin) validator3.setBottom(0.) self.ui.lineEdit_xmin.setValidator(validator3) validator4 = QtGui.QDoubleValidator(self.ui.lineEdit_xmax) validator4.setBottom(0.) self.ui.lineEdit_xmax.setValidator(validator4) validator5 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize) validator5.setBottom(0.) self.ui.lineEdit_binsize.setValidator(validator5) validator6 = QtGui.QDoubleValidator(self.ui.lineEdit_ptNo) validator6.setBottom(0) self.ui.lineEdit_ptNo.setValidator(validator6) validator7 = QtGui.QDoubleValidator(self.ui.lineEdit_detID) validator7.setBottom(0) self.ui.lineEdit_detID.setValidator(validator7) validator8 = QtGui.QDoubleValidator(self.ui.lineEdit_min2Theta) validator8.setBottom(0.) self.ui.lineEdit_min2Theta.setValidator(validator8) validator9 = QtGui.QDoubleValidator(self.ui.lineEdit_max2Theta) validator9.setBottom(0.) self.ui.lineEdit_max2Theta.setValidator(validator9) validator10 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize2Theta) validator10.setBottom(0.) self.ui.lineEdit_binsize2Theta.setValidator(validator10) validator11 = QtGui.QIntValidator(self.ui.lineEdit_scanStart) validator11.setBottom(1) self.ui.lineEdit_scanStart.setValidator(validator11) validator12 = QtGui.QIntValidator(self.ui.lineEdit_scanEnd) validator12.setBottom(1) self.ui.lineEdit_scanEnd.setValidator(validator12) validator13 = QtGui.QDoubleValidator(self.ui.lineEdit_normalizeMonitor) validator13.setBottom(0.) self.ui.lineEdit_normalizeMonitor.setValidator(validator13) validator14 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMinX) validator14.setBottom(0.) self.ui.lineEdit_mergeMinX.setValidator(validator14) validator15 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMaxX) validator15.setBottom(0.) self.ui.lineEdit_mergeMaxX.setValidator(validator15) validator16 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeBinSize) validator16.setBottom(0.) self.ui.lineEdit_mergeBinSize.setValidator(validator16) # Get initial setup # RELEASE 2.0 - This part will be implemented soon as default configuration is made # Mantid configuration self._instrument = str(self.ui.comboBox_instrument.currentText()) # UI widgets setup self.ui.comboBox_outputFormat.addItems(['Fullprof']) # Supports Fullprof only now, 'GSAS', 'Fullprof+GSAS']) # RELEASE 2.0 : Need to disable some widgets... consider to refactor the code self.ui.radioButton_useServer.setChecked(True) self.ui.radioButton_useLocal.setChecked(False) self.ui.checkBox_useDetExcludeFile.setChecked(True) self.ui.comboBox_wavelength.setCurrentIndex(0) self.ui.lineEdit_wavelength.setText('2.41') self.ui.pushButton_unit2theta.setText(r'$2\theta$') # vanadium spectrum smooth parameters self.ui.lineEdit_smoothParams.setText('20,2') # Set up data source self._serverAddress = DEFAULT_SERVER self._srcFromServer = True self._localSrcDataDir = None self._srcAtLocal = False self._currUnit = '2theta' # Workspaces self._myControl = HfirPDReductionControl.HFIRPDRedControl() # Interactive graphics self._viewMerge_X = None self._viewMerge_Y = None # Control of plots: key = canvas, value = list of 2-integer-tuple (expno, scanno) self._tabLineDict = {} self._tabBinParamDict = {} for key in [2]: self._tabLineDict[key] = [] for key in [2, 3, 4]: self._tabBinParamDict[key] = [None, None, None] self._lastMergeLabel = "" self._lastMergeIndex = -1 self._expNo = None self._scanNo = None self._detID = None self._indvXLabel = None self._rawDetExpNo = None self._rawDetScanNo = None self._rawDetPlotMode = None self._rawDetPtNo = None self._indvDetCanvasMode = 'samplelog' # Multiple scan tab self._multiScanExp = None self._multiScanList = [] #help self.assistantProcess = QtCore.QProcess(self) # pylint: disable=protected-access self.collectionFile=os.path.join(mantid._bindir,'../docs/qthelp/MantidProject.qhc') version = ".".join(mantid.__version__.split(".")[:2]) self.qtUrl='qthelp://org.sphinx.mantidproject.'+version+'/doc/interfaces/HFIRPowderReduction.html' self.externalUrl='http://docs.mantidproject.org/nightly/interfaces/HFIRPowderReduction.html' # Initial setup for tab self.ui.tabWidget.setCurrentIndex(0) cache_dir = str(self.ui.lineEdit_cache.text()).strip() if len(cache_dir) == 0 or os.path.exists(cache_dir) is False: invalid_cache = cache_dir if False: cache_dir = os.path.expanduser('~') else: cache_dir = os.getcwd() self.ui.lineEdit_cache.setText(cache_dir) self._logWarning("Cache directory %s is not valid. " "Using current workspace directory %s as cache." % (invalid_cache, cache_dir)) # Get on hold of raw data file useserver = self.ui.radioButton_useServer.isChecked() uselocal = self.ui.radioButton_useLocal.isChecked() if useserver == uselocal: self._logWarning("It is logically wrong to set up (1) neither server or local dir to " "access data or (2) both server and local dir to retrieve data. " "As default, it is set up to download data from server.") useserver = True uselocal = False self.ui.radioButton_useServer.setChecked(True) self.ui.radioButton_useLocal.setChecked(False) # ENDIF #register startup mantid.UsageService.registerFeatureUsage("Interface","HfirPowderReduction",False) return #-- Event Handling ---------------------------------------------------- def doBrowseCache(self): """ Pop out a dialog to let user specify the directory to cache downloaded data """ # home directory homedir = str(self.ui.lineEdit_cache.text()).strip() if len(homedir) > 0 and os.path.exists(homedir): home = homedir else: home = os.getcwd() # pop out a dialog dirs = str(QtGui.QFileDialog.getExistingDirectory(self,'Get Directory',home)) # set to line edit if dirs != home: self.ui.lineEdit_cache.setText(dirs) return def doBrowseExcludedDetetorFile(self): """ Browse excluded detector's file Return :: None """ # Get file name filefilter = "Text (*.txt);;Data (*.dat);;All files (*)" curDir = os.getcwd() excldetfnames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', curDir, filefilter) try: excldetfname = excldetfnames[0] self.ui.lineEdit_excludedDetFileName.setText(excldetfname) except IndexError: # return if there is no file selected return # Parse det exclusion file print("Detector exclusion file name is %s." % (excldetfname)) excludedetlist, errmsg = self._myControl.parseExcludedDetFile('HB2A', excldetfname) if len(errmsg) > 0: self._logError(errmsg) textbuf = "" for detid in excludedetlist: textbuf += "%d," % (detid) if len(textbuf) > 0: textbuf = textbuf[:-1] self.ui.lineEdit_detExcluded.setText(textbuf) # ENDIF return def doBrowseLocalDataSrc(self): """ Browse local data storage """ msg = "Browse local data storage location. Implement ASAP" QtGui.QMessageBox.information(self, "Click!", msg) return def doChangeSrcLocation(self): """ Source file location is changed """ useserver = self.ui.radioButton_useServer.isChecked() uselocal = self.ui.radioButton_useLocal.isChecked() print("Use Server: ", useserver) print("Use Local : ", uselocal) if (useserver is True and uselocal is True) or \ (useserver is False and uselocal is False): raise NotImplementedError("Impossible for radio buttons") self._srcAtLocal = uselocal self._srcFromServer = useserver if uselocal is True: self.ui.lineEdit_dataIP.setDisabled(True) self.ui.pushButton_chkServer.setDisabled(True) self.ui.lineEdit_localSrc.setDisabled(False) self.ui.pushButton_browseLocalSrc.setDisabled(False) else: self.ui.lineEdit_dataIP.setDisabled(False) self.ui.pushButton_chkServer.setDisabled(False) self.ui.lineEdit_localSrc.setDisabled(True) self.ui.pushButton_browseLocalSrc.setDisabled(True) return def doCheckSrcServer(self): """" Check source data server's availability """ msg = "Check source data server! Implement ASAP" QtGui.QMessageBox.information(self, "Click!", msg) return def doClearCanvas(self): """ Clear canvas """ itab = self.ui.tabWidget.currentIndex() if itab == 2: self.ui.graphicsView_reducedData.clearAllLines() self._tabLineDict[itab] = [] return def doClearIndDetCanvas(self): """ Clear the canvas in tab 'Individual Detector' and current plotted lines in managing dictionary """ # Clear all lines on canvas self.ui.graphicsView_indvDet.clearAllLines() # Remove their references in dictionary if self.ui.graphicsView_indvDet in self._tabLineDict: self._tabLineDict[self.ui.graphicsView_indvDet] = [] # Reset colur schedule self.ui.graphicsView_indvDet.resetLineColorStyle() return def doClearMultiRunCanvas(self): """ Clear the canvas in tab 'Multiple Run' This canvas is applied to both 1D and 2D image. Clear-all-lines might be not enough to clear 2D image """ self.ui.graphicsView_mergeRun.clearCanvas() return def doClearRawDetCanvas(self): """ Clear the canvas in tab 'Raw Detector': only need to clear lines """ self.ui.graphicsView_Raw.clearAllLines() self._tabLineDict[self.ui.graphicsView_Raw] = [] return def doClearVanadiumCanvas(self): """ Clear the canvas in tab 'Vanadium' """ self.ui.graphicsView_vanPeaks.clearAllLines() return def doExist(self): """ Exist the application """ clearcache = self.ui.checkBox_delCache.isChecked() if clearcache is True: urllib.delAllFile(self._cache) self.close() return def doHelp(self): """ Show help Copied from DGSPlanner """ self.assistantProcess.close() self.assistantProcess.waitForFinished() helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator() helpapp += 'assistant' args = ['-enableRemoteControl', '-collectionFile',self.collectionFile,'-showUrl',self.qtUrl] if os.path.isfile(helpapp) and os.path.isfile(self.collectionFile): self.assistantProcess.close() self.assistantProcess.waitForFinished() self.assistantProcess.start(helpapp, args) print("Show help from (app) ", helpapp) else: mqt.MantidQt.API.MantidDesktopServices.openUrl(QtCore.QUrl(self.externalUrl)) print("Show help from (url)", QtCore.QUrl(self.externalUrl)) return def doLoadData(self, exp=None, scan=None): """ Load and reduce data It does not support for tab 'Advanced Setup' For tab 'Raw Detector' and 'Individual Detector', this method will load data to MDEventWorkspaces For tab 'Normalized' and 'Vanadium', this method will load data to MDEVentWorkspaces but NOT reduce to single spectrum """ # Kick away unsupported tabs itab = self.ui.tabWidget.currentIndex() tabtext = str(self.ui.tabWidget.tabText(itab)) print("[DB] Current active tab is No. %d as %s." % (itab, tabtext)) # Rule out unsupported tab if itab == 5: # 'advanced' msg = "Tab %s does not support 'Load Data'. Request is ambiguous." % tabtext QtGui.QMessageBox.information(self, "Click!", msg) return # Get exp number and scan number if isinstance(exp, int) is True and isinstance(scan, int) is True: # use input expno = exp scanno = scan else: # read from GUI try: expno, scanno = self._uiGetExpScanNumber() self._logDebug("Attending to load Exp %d Scan %d." % (expno, scanno)) except NotImplementedError as ne: self._logError("Error to get Exp and Scan due to %s." % (str(ne))) return # ENDIF # Form data file name and download data status, datafilename = self._uiDownloadDataFile(exp=expno, scan=scanno) if status is False: self._logError("Unable to download or locate local data file for Exp %d \ Scan %d." % (expno, scanno)) # ENDIF(status) # (Load data for tab 0, 1, 2 and 4) if itab not in [0, 1, 2, 3, 4]: # Unsupported Tabs: programming error! errmsg = "%d-th tab should not get this far.\n"%(itab) errmsg += 'GUI has been changed, but the change has not been considered! iTab = %d' % (itab) raise NotImplementedError(errmsg) # Load SPICE data to raw table (step 1) try: execstatus = self._myControl.loadSpicePDData(expno, scanno, datafilename) if execstatus is False: cause = "Load data failed." else: cause = None except NotImplementedError as ne: execstatus = False cause = str(ne) # END-TRY-EXCEPT # Return as failed to load data if execstatus is False: self._logError(cause) return # Obtain the correction file names and wavelength from SPICE file wavelengtherror = False errmsg = "" localdir = os.path.dirname(datafilename) try: status, returnbody = self._myControl.retrieveCorrectionData(instrument='HB2A', exp=expno, scan=scanno, localdatadir=localdir) except NotImplementedError as e: errmsg = str(e) if errmsg.count('m1') > 0: # error is about wavelength status = False wavelengtherror = True else: # other error raise e # ENDTRY if status is True: autowavelength = returnbody[0] vancorrfname = returnbody[1] excldetfname = returnbody[2] if vancorrfname is not None: self.ui.lineEdit_vcorrFileName.setText(vancorrfname) if excldetfname is not None: self.ui.lineEdit_excludedDetFileName.setText(excldetfname) else: autowavelength = None vancorrfname = None excldetfname = None # ENDIF # Set wavelength to GUI except 'multiple scans' if autowavelength is None: # unable to get wavelength from SPICE data self.ui.comboBox_wavelength.setCurrentIndex(4) if wavelengtherror is True: self.ui.lineEdit_wavelength.setText(errmsg) else: self.ui.lineEdit_wavelength.setText(self.ui.comboBox_wavelength.currentText()) self._myControl.setWavelength(expno, scanno, wavelength=None) else: # get wavelength from SPICE data. set value to GUI self.ui.lineEdit_wavelength.setText(str(autowavelength)) allowedwavelengths = [2.41, 1.54, 1.12] numitems = self.ui.comboBox_wavelength.count() good = False for ic in range(numitems-1): if abs(autowavelength - allowedwavelengths[ic]) < 0.01: good = True self.ui.comboBox_wavelength.setCurrentIndex(ic) # ENDFOR if good is False: self.ui.comboBox_wavelength.setCurrentIndex(numitems-1) # ENDIF self._myControl.setWavelength(expno, scanno, wavelength=autowavelength) # ENDIFELSE # Optionally obtain and parse det effecient file if self.ui.checkBox_useDetEffCorr.isChecked() is True: # Apply detector efficiency correction if vancorrfname is None: # browse vanadium correction file filefilter = "Text (*.txt);;Data (*.dat);;All files (*)" curDir = os.getcwd() vancorrfnames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', curDir, filefilter) if len(vancorrfnames) > 0: vancorrfname = vancorrfnames[0] self.ui.lineEdit_vcorrFileName.setText(str(vancorrfname)) else: self._logError("User does not specify any vanadium correction file.") self.ui.checkBox_useDetEffCorr.setChecked(False) # ENDIF-len() # ENDIF vancorrfname # Parse if it is not None if vancorrfname is not None: detefftablews, errmsg = self._myControl.parseDetEffCorrFile('HB2A', vancorrfname) if detefftablews is None: print("Parsing detectors efficiency file error: %s." % (errmsg)) else: detefftablews = None # ENDIF else: # Not chosen to apply detector efficiency correction:w detefftablews = None # ENDIF # Parse SPICE data to MDEventWorkspaces try: print("Det Efficiency Table WS: ", str(detefftablews)) execstatus = self._myControl.parseSpiceData(expno, scanno, detefftablews) if execstatus is False: cause = "Parse data failed." else: cause = None except NotImplementedError as e: execstatus = False cause = str(e) # END-TRY-EXCEPT-FINALLY # Return if data parsing is error if execstatus is False: self._logError(cause) return # Optionally parse detector exclusion file and set to line text if excldetfname is not None: excludedetlist, errmsg = self._myControl.parseExcludedDetFile('HB2A', excldetfname) textbuf = "" for detid in excludedetlist: textbuf += "%d," % (detid) if len(textbuf) > 0: textbuf = textbuf[:-1] self.ui.lineEdit_detExcluded.setText(textbuf) # ENDIF # Set up some widgets for raw detector data. Won't be applied to tab 3 if itab != 3: floatsamplelognamelist = self._myControl.getSampleLogNames(expno, scanno) self.ui.comboBox_indvDetXLabel.clear() self.ui.comboBox_indvDetXLabel.addItem("2theta/Scattering Angle") self.ui.comboBox_indvDetXLabel.addItems(floatsamplelognamelist) self.ui.comboBox_indvDetYLabel.clear() self.ui.comboBox_indvDetYLabel.addItems(floatsamplelognamelist) return True def doLoadSetData(self): """ Load a set of data This is the first step of doing multiple scans processing """ # Get inputs for exp number and scans try: rtup = self._uiGetExpScanTabMultiScans() expno = rtup[0] scanlist = rtup[1] except NotImplementedError as nie: self._logError("Unable to load data set in multiple scans due to %s." % (str(nie))) # Load and reduce data loadstatus = True for scan in sorted(scanlist): tempstatus = self.doLoadData(expno, scan) if tempstatus is False: self.ui.label_mergeMessage.setText('Error to load Exp %d Scan %d.'%(expno, scan)) loadstatus = False else: message = 'Loaded Exp %d Scan %d.' % (expno, scan) self.ui.label_mergeMessage.setText(message) # ENDFOR # Load status if loadstatus is True: self.ui.label_mergeMessage.setText('All data files are loaded') else: self.ui.label_mergeMessage.setText('Not all data files are loaded') # Wave length haswavelength = True for scan in scanlist: if self._myControl.getWavelength(expno, scan) is None: self._logNotice("Exp %d Scan %d has no wavelength set up." % (expno, scan)) haswavelength = False break # ENDFOR # Set unit box if haswavelength is True: self.ui.comboBox_mscanUnit.clear() self.ui.comboBox_mscanUnit.addItems(['2theta', 'dSpacing', 'Momentum Transfer (Q)']) else: self.ui.comboBox_mscanUnit.clear() self.ui.comboBox_mscanUnit.addItems(['2theta']) return def doLoadReduceScanPrev(self): """ Load and reduce previous scan for tab 'Normalized' """ # Reduce scan number by 1 try: scanno = int(self.ui.lineEdit_scanNo.text()) except ValueError: self._logError("Either Exp No or Scan No is not set up right as integer.") return else: scanno = scanno - 1 if scanno < 1: self._logWarning("Scan number is 1 already. Cannot have previous scan") return self.ui.lineEdit_scanNo.setText(str(scanno)) # Load data self.ui.lineEdit_scanNo.setText(str(scanno)) self.doLoadData() # Reduce data self._uiReducePlotNoramlized(self._currUnit) return def doLoadReduceScanNext(self): """ Load and reduce next scan for tab 'Normalized' """ # Advance scan number by 1 try: scanno = int(self.ui.lineEdit_scanNo.text()) except ValueError: self._logError("Either Exp No or Scan No is not set up right as integer.") return False else: scanno = scanno + 1 if scanno < 1: self._logWarning("Scan number is 1 already. Cannot have previous scan") return False # Load data self.ui.lineEdit_scanNo.setText(str(scanno)) execstatus = self.doLoadData() print("[DB] Load data : ", execstatus) # Reduce data self._uiReducePlotNoramlized(self._currUnit) return def doMergeScans(self): """ Merge several scans for tab 'merge' """ # Get exp number and list of scans try: r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] except NotImplementedError as ne: self._logError(str(ne)) return False # Check whether the wavelengths are same to merge try: wl_list = [] for scanno in scanlist: print("Exp %d Scan %d. Wavelength = %s." % (expno, scanno, str(self._myControl.getWavelength(expno, scanno)))) wl_list.append(float(self._myControl.getWavelength(expno, scanno))) wl_list = sorted(wl_list) min_wl = wl_list[0] max_wl = wl_list[-1] if max_wl - min_wl > 1.0: self._logWarning("All scans do not have same wavelengths!") except TypeError: self._logError('Not all scans have wavelength set up. Unable to merge scans.') return # Check! try: unit = str(self.ui.comboBox_mscanUnit.currentText()) xmin, binsize, xmax = self._uiGetBinningParams(itab=3) #wavelength = min_wl mindex = self._myControl.mergeReduceSpiceData(expno, scanlist, unit, xmin, xmax, binsize) except Exception as e: raise e label = "Exp %d, Scan %s." % (expno, str(scanlist)) self._plotMergedReducedData(mindex, label) self._lastMergeIndex = mindex self._lastMergeLabel = label return def doMergeScanView1D(self): """ Change the multiple runs to 1D """ # Highlight the button's color self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: yellow; color: red;}') self.ui.pushButton_view2D.setEnabled(True) self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: white; color: gray;}') self.ui.pushButton_viewMScan1D.setEnabled(False) # Process input experiment number and scan list try: r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] except NotImplementedError as e: self._logError(str(e)) return False # Clear image canvas = self.ui.graphicsView_mergeRun canvas.clearAllLines() canvas.clearCanvas() # Plot data unit = str(self.ui.comboBox_mscanUnit.currentText()) xlabel = self._getXLabelFromUnit(unit) for scanno in scanlist: label = "Exp %s Scan %s"%(str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False) # ENDFOR return def doMergeScanView2D(self): """ Change the merged run's view to 2D plot """ # Highlight button color and change the color of another one self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: white; color: gray;}') self.ui.pushButton_view2D.setEnabled(False) self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: yellow; color: red;}') self.ui.pushButton_viewMScan1D.setEnabled(True) # Get list of data to plot r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] # Convert the workspaces to 2D vector vecylist = [] yticklabels = [] xmin = None xmax = None for scanno in scanlist: # put y values to list for constructing 2D array vecx, vecy = self._myControl.getVectorToPlot(expno, scanno) vecylist.append(vecy) yticklabels.append('Exp %d Scan %d' % (expno, scanno)) #print "[DB] Scan ", scanno, ": X range: ", vecx[0], vecx[-1], " Size X = ", len(vecx) # set up range of x if xmin is None: xmin = vecx[0] xmax = vecx[-1] # ENDIF # ENDFOR dim2array = numpy.array(vecylist) #print "2D vector: \n", dim2array #print "x range: %f, %f" % (xmin, xmax) #print "y labels: ", yticklabels # Plot holdprev=False self.ui.graphicsView_mergeRun.clearAllLines() self.ui.graphicsView_mergeRun.addPlot2D(dim2array, xmin=xmin, xmax=xmax, ymin=0, ymax=len(vecylist), holdprev=holdprev, yticklabels=yticklabels) return def doMergeScanViewMerged(self): """ Change the merged run's view to 1D plot """ # Highlight the button's color self.ui.pushButton_view2D.setStyleSheet('QPushButton {color: red;}') self.ui.pushButton_view2D.setEnabled(True) self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {color: red;}') self.ui.pushButton_viewMScan1D.setEnabled(True) # Clear image self.ui.graphicsView_mergeRun.clearCanvas() # Plot self._plotMergedReducedData(mkey=self._lastMergeIndex, label=self._lastMergeLabel) return def doPlotIndvDetMain(self): """ Plot individual detector """ # Get exp and scan numbers and check whether the data has been loaded try: expno = self._getInteger(self.ui.lineEdit_expNo) scanno = self._getInteger(self.ui.lineEdit_scanNo) except EmptyError as e: self._logError(str(e)) return # Get detector ID and x-label option try: status, detidlist = self._getIntArray(self.ui.lineEdit_detID.text()) if status is False: errmsg = detidlist print("Unable to parse detector IDs due to %s."%(errmsg)) return else: print("[DB] Detectors to plot: %s"%(detidlist)) except EmptyError: self._logError("Detector ID must be specified for plotting individual detector.") return # Over plot previous or clear overplot = self.ui.checkBox_overPlotIndvDet.isChecked() if overplot is False: self.doClearIndDetCanvas() xlabel = str(self.ui.comboBox_indvDetXLabel.currentText()).strip() if xlabel != "" and xlabel != "Pt." and xlabel != "2theta/Scattering Angle": # Plot with sample logs other than Pt. self._logNotice("New Feature: X-label %s is supported for plotting individual detector's counts. " " Set to detector angle." % xlabel) xlabel = xlabel else: # Plot with Pt. or detector angles if xlabel != "Pt.": xlabel = "" self._logNotice("X-label for individual detectror is '%s'." % (xlabel)) # plot for detid in sorted(detidlist): try: self._plot_individual_detector_counts(expno, scanno, detid, xlabel, resetboundary=not overplot) self._expNo = expno self._scanNo = scanno self._detID = detid self._indvXLabel = xlabel except NotImplementedError as e: self._logError(str(e)) return def doPlotIndvDetNext(self): """ Plot next raw detector signals for tab 'Individual Detector' """ # Plot try: currdetid = self._detID + 1 # Over plot previous or clear overplot = self.ui.checkBox_overPlotIndvDet.isChecked() if overplot is False: self.doClearIndDetCanvas() self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid, self._indvXLabel) except KeyError as e: self._logError(str(e)) else: self._detID = currdetid # Update widget self.ui.lineEdit_detID.setText(str(self._detID)) return def doPlotIndvDetPrev(self): """ Plot previous individual detector's signal for tab 'Individual Detector' """ # Plot try: currdetid = self._detID - 1 # Over plot previous or clear overplot = self.ui.checkBox_overPlotIndvDet.isChecked() if overplot is False: self.doClearIndDetCanvas() self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid, self._indvXLabel) except KeyError as e: self._logError(str(e)) else: self._detID = currdetid # Update widget self.ui.lineEdit_detID.setText(str(self._detID)) return def do_convert_plot_multi_scans(self): """ Convert individual plots from normalized to raw or vice verse """ # Identify the mode if str(self.ui.pushButton_plotRawMultiScans.text()) == 'Plot Raw': new_mode = 'Plot Raw' else: new_mode = 'Plot Normalized' # Get information try: min_x = self._getFloat(self.ui.lineEdit_mergeMinX) except EmptyError: min_x = None try: max_x = self._getFloat(self.ui.lineEdit_mergeMaxX) except EmptyError: max_x = None bin_size = self._getFloat(self.ui.lineEdit_mergeBinSize) # Process input experiment number and scan list try: r = self._uiGetExpScanTabMultiScans() exp_no = r[0] scan_list = r[1] except NotImplementedError as e: self._logError(str(e)) return False # Re-process the data if new_mode == 'Plot Raw': if self._multiScanList is None or self._multiScanExp is None: raise NotImplementedError('Experiment and scan list are not set up for plot raw.') self._myControl.scale_to_raw_monitor_counts(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size) else: self._myControl.reset_to_normalized(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size) # Clear image canvas = self.ui.graphicsView_mergeRun canvas.clearAllLines() canvas.clearCanvas() canvas.resetLineColorStyle() # Plot data unit = str(self.ui.comboBox_mscanUnit.currentText()) xlabel = self._getXLabelFromUnit(unit) for scan_no in scan_list: label = "Exp %s Scan %s"%(str(exp_no), str(scan_no)) self._plotReducedData(exp_no, scan_no, canvas, xlabel, label=label, clearcanvas=False) # END_FOR # Change the button name if new_mode == 'Plot Raw': self.ui.pushButton_plotRawMultiScans.setText('Plot Normalized') else: self.ui.pushButton_plotRawMultiScans.setText('Plot Raw') return def doPlotRawPtMain(self): """ Plot current raw detector signal for a specific Pt. """ # Get experiment number and scan number for data file try: expno = self._getInteger(self.ui.lineEdit_expNo) scanno = self._getInteger(self.ui.lineEdit_scanNo) except EmptyError as e: self._logError(str(e)) return # plot options doOverPlot = bool(self.ui.checkBox_overpltRawDet.isChecked()) plotmode = str(self.ui.comboBox_rawDetMode.currentText()) try: ptNo = self._getInteger(self.ui.lineEdit_ptNo) except EmptyError: ptNo = None # plot print("[DB] Plot Raw Detector: PlotMode = %s." % (plotmode)) execstatus = self._plotRawDetSignal(expno, scanno, plotmode, ptNo, doOverPlot) # set global values if good if execstatus is True: self._rawDetPtNo = ptNo self._rawDetExpNo = expno self._rawDetScanNo = scanno self._rawDetPlotMode = plotmode else: print("[Error] Execution fails with signal %s. " % (str(execstatus))) return def doPlotRawPtNext(self): """ Plot next raw detector signals """ # Check if self._rawDetPtNo is not None: ptno = self._rawDetPtNo + 1 else: self._logError("Unable to plot previous raw detector \ because Pt. or Detector ID has not been set up yet.") return # EndIfElse # Get plot mode and plot plotmode = str(self.ui.comboBox_rawDetMode.currentText()) overplot = bool(self.ui.checkBox_overpltRawDet.isChecked()) execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode, ptno, overplot) # update if it is good to plot if execstatus is True: self._rawDetPtNo = ptno self.ui.lineEdit_ptNo.setText(str(ptno)) return def do_enable_excluded_dets(self): """ Enable or disable the line editor for excluded detectors :return: """ if self.ui.checkBox_useDetExcludeFile.isChecked() is True: self.ui.lineEdit_detExcluded.setEnabled(True) else: self.ui.lineEdit_detExcluded.setDisabled(True) return def do_plot_raw_pt_prev(self): """ Plot previous raw detector """ # Validate input if self._rawDetPtNo is not None: ptno = self._rawDetPtNo - 1 else: self._logError("Unable to plot previous raw detector \ because Pt. or Detector ID has not been set up yet.") return # get plot mode and do plt plotmode = str(self.ui.comboBox_rawDetMode.currentText()) overplot = bool(self.ui.checkBox_overpltRawDet.isChecked()) execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode, ptno, overplot) # update if it is good to plot if execstatus is True: self._rawDetPtNo = ptno self.ui.lineEdit_ptNo.setText(str(ptno)) return def do_plot_sample_log(self): """ Plot sample log vs. Pt. in tab 'Individual Detector' """ expNo = int(self.ui.lineEdit_expNo.text()) scanno = int(self.ui.lineEdit_scanNo.text()) logname = str(self.ui.comboBox_indvDetYLabel.currentText()) self._plotSampleLog(expNo, scanno, logname) return def doReduce2Theta(self): """ Rebin the data and plot in 2theta for tab 'Normalized' """ unit = '2theta' self._uiReducePlotNoramlized(unit) return def doReduceDSpacing(self): """ Rebin the data and plot in d-spacing for tab 'Normalized' """ # new unit and information unit = "dSpacing" self._uiReducePlotNoramlized(unit) return def doReduceQ(self): """ Rebin the data and plot in momentum transfer Q for tab 'Normalized' """ unit = 'Momentum Transfer (Q)' self._uiReducePlotNoramlized(unit) return def doReduceSetData(self): """ Reduce multiple data """ # Get exp number and list of scans try: r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] except NotImplementedError as e: self._logError(str(e)) return False else: self._multiScanExp = expno self._multiScanList = scanlist # Reduce and plot data unit = str(self.ui.comboBox_mscanUnit.currentText()) xlabel = self._getXLabelFromUnit(unit) canvas = self.ui.graphicsView_mergeRun # canvas.clearAllLines() NO NEED canvas.clearCanvas() canvas.resetLineColorStyle() for scan in scanlist: r = self._uiReduceData(3, unit, expno, scan) good = r[0] expno = r[1] scanno = r[2] if good is True: label = "Exp %s Scan %s"%(str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False) else: self._logError('Failed to reduce Exp %s Scan %s'%(str(expno), str(scanno))) # ENDIF # ENDFOR return def doReduceVanadium2Theta(self): """ Rebin MDEventWorkspaces in 2-theta. for pushButton_rebinD in vanadium peak strip tab Suggested workflow 1. Rebin data 2. Calculate vanadium peaks in 2theta 3. """ # Reduce data unit = '2theta' itab = 4 r = self._uiReduceData(itab, unit) good = r[0] expno = r[1] scanno = r[2] # Plot reduced data and vanadium peaks if good is True: canvas = self.ui.graphicsView_vanPeaks xlabel = self._getXLabelFromUnit(unit) label = "Exp %s Scan %s"%(str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=True) # plot vanadium peaks vanpeakpos = self._myControl.getVanadiumPeaksPos(expno, scanno) self.ui.lineEdit_stripVPeaks.setText(str(vanpeakpos)) self._plotPeakIndicators(self.ui.graphicsView_vanPeaks, vanpeakpos) return good def doSaveData(self): """ Save data """ # get exp number and scan number try: # exp and scan expno, scanno = self._uiGetExpScanNumber() # file type filetype = str(self.ui.comboBox_outputFormat.currentText()) # file name savedatadir = str(self.ui.lineEdit_outputFileName.text()).strip() if savedatadir is not None and os.path.exists(savedatadir) is True: homedir = savedatadir else: homedir = os.getcwd() # launch a dialog to get data filefilter = "All files (*);;Fullprof (*.dat);;GSAS (*.gsa)" sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File', homedir, filefilter)) except NotImplementedError as e: self._logError(str(e)) else: self._myControl.savePDFile(expno, scanno, filetype, sfilename) return def doSaveMergedScan(self): """ Save merged scan """ homedir = os.getcwd() filefilter = "Fullprof (*.dat)" sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter)) self._myControl.saveMergedScan(sfilename, mergeindex=self._lastMergeIndex) return def doSaveMultipleScans(self): """ Save multiple scans """ # Get experiment number and scans r = self._uiGetExpScanTabMultiScans() expno = r[0] scanslist = r[1] # Get base file name homedir = os.getcwd() savedir = str(QtGui.QFileDialog.getExistingDirectory(self,'Get Directory To Save Fullprof',homedir)) for scanno in scanslist: sfilename = os.path.join(savedir, "HB2A_Exp%d_Scan%d_FP.dat"%(expno, scanno)) self._myControl.savePDFile(expno, scanno, 'fullprof', sfilename) # ENDFOR return def doSaveVanRun(self): """ Save the vanadium run with peaks removed """ # Get experiment number and scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False homedir = os.getcwd() filefilter = "Fullprof (*.dat)" sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter)) self._myControl.saveProcessedVanadium(expno, scanno, sfilename) return def doSmoothVanadiumData(self): """ Smooth vanadium spectrum """ # Get experiment number and scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False smoothparams_str = str(self.ui.lineEdit_smoothParams.text()) # Smooth data status = self._myControl.smoothVanadiumSpectrum(expno, scanno, smoothparams_str) if status is False: self._logError("Failed to smooth vanadium data") # Plot unit = '2theta' xlabel = self._getXLabelFromUnit(unit) label = "Vanadium Exp %d Scan %d FFT-Smooth by %s" % (expno, scanno, smoothparams_str) self._plotVanadiumRun(expno, scanno, xlabel, label, False, True) return def doSmoothVanadiumApply(self): """ Apply smoothing effect to vanadium data """ # Get experiment number and scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False self._myControl.applySmoothVanadium(expno, scanno, True) return def doSmoothVanadiumUndo(self): """ Undo smoothing vanadium """ try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False self._myControl.applySmoothVanadium(expno, scanno, False) return def doStripVandiumPeaks(self): """ Strip vanadium peaks """ # Get exp number an scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Error to get Exp and Scan due to %s." % (str(e))) return False # Default unit unit = '2theta' # Get and build binning parameter xmin, binsize, xmax = self._uiGetBinningParams(itab=4) if xmin is None: binparams = '%f'%(binsize) else: binparams = '%f,%f,%f'%(xmin, binsize, xmax) # Strip vanadium peak good = self._myControl.stripVanadiumPeaks(expno, scanno, binparams, vanpeakposlist=None) # Plot if good is True: xlabel = self._getXLabelFromUnit(unit) label="Exp %d Scan %d Bin = %.5f Vanadium Stripped" % (expno, scanno, binsize) self._plotVanadiumRun(expno, scanno, xlabel, label, False) return def doUpdateWavelength(self): """ Update the wavelength to line edit """ index = self.ui.comboBox_wavelength.currentIndex() print("Update wavelength to ", index) if index == 0: wavelength = 2.41 elif index == 1: wavelength = 1.54 elif index == 2: wavelength = 1.12 else: wavelength = None self.ui.lineEdit_wavelength.setText(str(wavelength)) return def on_mouseDownEvent(self, event): """ Respond to pick up a value with mouse down event Definition of button_press_event is: button_press_event(x, y, button, dblclick=False, guiEvent=None) Thus event has x, y and button. event.button has 3 values: 1: left 2: middle 3: right """ # FUTURE: Need to make this work x = event.xdata y = event.ydata button = event.button if x is not None and y is not None: # mouse is clicked within graph if button == 1: msg = "Mouse 1: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button) print(msg) elif button == 2: msg = "Mouse 2: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button) QtGui.QMessageBox.information(self, "Click!", msg) elif button == 3: # right click of mouse will pop up a context-menu # menu should be self.ui.menu? menu = QtGui.QMenu(self) addAction = QtGui.QAction('Add', self) addAction.triggered.connect(self.addSomething) menu.addAction(addAction) rmAction = QtGui.QAction('Remove', self) rmAction.triggered.connect(self.rmSomething) menu.addAction(rmAction) # add other required actions menu.popup(QtGui.QCursor.pos()) # ENDIF return def on_mouseMotion(self, event): """ Event handler for mouse being detected to move """ # prev_x = self._viewMerge_X # prev_y = self._viewMerge_Y curx = event.xdata cury = event.ydata if curx is None or cury is None: return self._viewMerge_X = event.xdata self._viewMerge_Y = event.ydata #if prey is None or int(prey) != int(self._viewMerge_Y): # print "Mouse is moving to ", event.xdata, event.ydata return def addSomething(self): """ """ # FUTURE - Need to implement how to deal with this print("Add scan back to merge") return def rmSomething(self): """ """ # FUTURE - Need to implement how to deal with this print("Remove a scan from merged data.") return #-------------------------------------------------------------------------- # Private methods to plot data #-------------------------------------------------------------------------- def _plotIndividualDetCountsVsSampleLog(self, expno, scanno, detid, samplename, raw=True): """ Plot one specific detector's counts vs. one specified sample log's value along with all Pts. For example: detector 11's counts vs. sample_b's value :param expno: :param scanno: :param detid: :param samplename: :param raw: boolean whether the output is normalized by monitor counts :return: """ # Validate input try: expno = int(expno) scanno = int(scanno) detid = int(detid) samplename = str(samplename) except ValueError: raise NotImplementedError("ExpNo, ScanNo or DetID is not integer.") # Get the array for detector counts vs. sample log value by mapping Pt. vecx, vecy = self._myControl.getIndividualDetCountsVsSample(expno, scanno, detid, samplename, raw) # Clear canvas self.ui.graphicsView_indvDet.clearCanvas() # Plot marker, color = self.ui.graphicsView_indvDet.getNextLineMarkerColorCombo() self.ui.graphicsView_indvDet.add_plot1d(vec_x=vecx, vec_y=vecy, marker=marker, color=color, x_label=samplename, y_label='Counts', label='DetID = %d'%(detid)) # FUTURE: In future, need to find out how to use self._graphIndDevMode # self._graphIndDevMode = (samplename, 'Counts') return def _plot_individual_detector_counts(self, expno, scanno, detid, xaxis, resetboundary=False): """ Plot a specific detector's counts along all experiment points (pt) :param expno: :param scanno: :param detid: :param xaxis: :param resetboundary: :return: """ # Validate input expno = int(expno) scanno = int(scanno) detid = int(detid) plot_error_bar = self.ui.checkBox_indDetErrorBar.isChecked() plot_normal = self.ui.checkBox_indDetNormByMon.isChecked() # Reject if data is not loaded if self._myControl.hasDataLoaded(expno, scanno) is False: self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno)) return False # Canvas and line information canvas = self.ui.graphicsView_indvDet if (canvas in self._tabLineDict) is False: self._tabLineDict[canvas] = [] # get data self._logNotice("Input x-axis is '%s' for plotting individual detector's counts."%(xaxis)) if len(xaxis) == 0: xaxis = None vecx, vecy = self._myControl.getIndividualDetCounts(expno, scanno, detid, xaxis, plot_normal) if isinstance(vecx, numpy.ndarray) is False: raise NotImplementedError('vecx, vecy must be numpy arrays.') if plot_error_bar is True: y_err = numpy.sqrt(vecy) else: y_err = None # Plot to canvas marker, color = canvas.getNextLineMarkerColorCombo() if xaxis == "" or xaxis == "2theta/Scattering Angle": xlabel = r'$2\theta$' else: #xlabel = "Pt." xlabel = xaxis # FUTURE - If it works with any way of plotting, then refactor Pt. with any other sample names label = "Detector ID: %d" % (detid) if self._tabLineDict[canvas].count((expno, scanno, detid)) == 0: canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='Counts', label=label, y_err=y_err) self._tabLineDict[canvas].append((expno, scanno, detid)) if resetboundary is True: # Set xmin and xmax about the data for first time xmin = min(vecx) xmax = max(vecx) ymin = min(vecy) ymax = max(vecy) resetboundary = False else: # auto setup for image boundary xmin = min(min(vecx), canvas.getXLimit()[0]) xmax = max(max(vecx), canvas.getXLimit()[1]) ymin = min(min(vecy), canvas.getYLimit()[0]) ymax = max(max(vecy), canvas.getYLimit()[1]) # ENDIFELSE dx = xmax-xmin dy = ymax-ymin canvas.setXYLimit(xmin-dx*0.0001, xmax+dx*0.0001, ymin-dy*0.0001, ymax+dy*0.0001) # Set canvas mode # FUTURE: Consider how to use self._graphIndDevMode in future # self._graphIndDevMode = (xlabel, 'Counts') return True def _plotPeakIndicators(self, canvas, peakposlist): """ Plot indicators for peaks """ print("[DB] Peak indicators are at ", peakposlist) rangey = canvas.getYLimit() rangex = canvas.getXLimit() for pos in peakposlist: if pos >= rangex[0] and pos <= rangex[1]: vecx = numpy.array([pos, pos]) vecy = numpy.array([rangey[0], rangey[1]]) canvas.add_plot1d(vecx, vecy, color='black', line_style='--') # ENDFOR return def _plotRawDetSignal(self, expno, scanno, plotmode, ptno, dooverplot): """ Plot the counts of all detectors of a certain Pt. in an experiment """ # Validate input expno = int(expno) scanno = int(scanno) # Set up canvas and dictionary canvas = self.ui.graphicsView_Raw if (canvas in self._tabLineDict) is False: self._tabLineDict[canvas] = [] # Check whether data exists if self._myControl.hasDataLoaded(expno, scanno) is False: self._logError("File has not been loaded for Exp %d Scan %d. Load data first!" % (expno, scanno)) return # Get vecx and vecy if plotmode == "All Pts.": # Plot all Pts. vecxylist = self._myControl.getRawDetectorCounts(expno, scanno) # Clear previous self.ui.graphicsView_Raw.clearAllLines() self.ui.graphicsView_Raw.setLineMarkerColorIndex(0) self._tabLineDict[canvas] = [] elif plotmode == "Single Pts.": # Plot plot ptno = int(ptno) if dooverplot is False: self.ui.graphicsView_Raw.clearAllLines() self.ui.graphicsView_Raw.setLineMarkerColorIndex(0) self._tabLineDict[canvas] = [] # Plot one pts. vecxylist = self._myControl.getRawDetectorCounts(expno, scanno, [ptno]) else: # Raise exception raise NotImplementedError("Plot mode %s is not supported." % (plotmode)) # Set up unit/x-label unit = r"$2\theta$" # plot xmin = None xmax = None ymin = None ymax = None for ptno, vecx, vecy in vecxylist: # FUTURE: Label is left blank as there can be too many labels label = 'Pt %d' % (ptno) # skip if this plot has existed if self._tabLineDict[canvas].count( (expno, scanno, ptno) ) == 1: continue marker, color = canvas.getNextLineMarkerColorCombo() canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=unit, y_label='intensity',label=label) # set up line tuple self._tabLineDict[canvas].append( (expno, scanno, ptno) ) # auto setup for image boundary xmin = min(min(vecx), canvas.getXLimit()[0]) xmax = max(max(vecx), canvas.getXLimit()[1]) ymin = min(min(vecy), canvas.getYLimit()[0]) ymax = max(max(vecy), canvas.getYLimit()[1]) # ENDFOR # Reset canvas x-y limit if xmin is not None: dx = xmax-xmin dy = ymax-ymin canvas.setXYLimit(xmin-dx*0.0001, xmax+dx*0.0001, ymin-dy*0.0001, ymax+dy*0.0001) return True def _plotMergedReducedData(self, mkey, label): """ Plot the reduced data from merged ... """ # get the data try: vecx, vecy = self._myControl.getMergedVector(mkey) except KeyError as e: self._logError("Unable to retrieve merged reduced data due to %s." % (str(e))) return canvas = self.ui.graphicsView_mergeRun # Clear canvas canvas.clearAllLines() canvas.clearCanvas() # Plot marker, color = canvas.getNextLineMarkerColorCombo() xlabel = self._getXLabelFromUnit(self.ui.comboBox_mscanUnit.currentText()) canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='intensity',label=label) xmax = max(vecx) xmin = min(vecx) dx = xmax-xmin ymax = max(vecy) ymin = min(vecy) dy = ymax-ymin canvas.setXYLimit(xmin-dx*0.1, xmax+dx*0.1, ymin-dy*0.1, ymax+dy*0.1) return def _plotReducedData(self, exp, scan, canvas, xlabel, label=None, clearcanvas=True, spectrum=0, plot_error=False): """ Plot reduced data for exp and scan """ if spectrum != 0: raise NotImplementedError("Unable to support spectrum = %d case."%(spectrum)) # whether the data is load if self._myControl.hasReducedWS(exp, scan) is False: self._logWarning("No data to plot!") return # get to know whether it is required to clear the image if clearcanvas is True: canvas.clearAllLines() canvas.setLineMarkerColorIndex(0) # plot vec_x, vec_y = self._myControl.getVectorToPlot(exp, scan) if isinstance(vec_x, numpy.ndarray) is False: vec_x = numpy.array(vec_x) vec_y = numpy.array(vec_y) # FUTURE - Should check y_err set up correctly in Mantid or not if plot_error is True: raise RuntimeError('Implement how to return y_err ASAP.') else: y_err = None # get the marker color for the line marker, color = canvas.getNextLineMarkerColorCombo() # plot if label is None: label = "Exp %d Scan %d" % (exp, scan) canvas.add_plot1d(vec_x, vec_y, marker=marker, color=color, x_label=xlabel, y_label='intensity',label=label, y_err=y_err) if clearcanvas is True: xmax = max(vec_x) xmin = min(vec_x) dx = xmax-xmin ymax = max(vec_y) ymin = min(vec_y) dy = ymax-ymin canvas.setXYLimit(xmin-dx*0.1, xmax+dx*0.1, ymin-dy*0.1, ymax+dy*0.1) return def _plotSampleLog(self, expno, scanno, samplelogname): """ Plot the value of a sample log among all Pt. """ # Validate input expno = int(expno) scanno = int(scanno) samplelogname = str(samplelogname) # Reject if data is not loaded if self._myControl.hasDataLoaded(expno, scanno) is False: self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno)) return False # Canvas and line information self._indvDetCanvasMode = 'samplelog' # pop out the xlabel list # REFACTOR - Only need to set up once if previous plot has the same setup if self.ui.comboBox_indvDetXLabel.count() == 0: floatsamplelognamelist = self._myControl.getSampleLogNames(expno, scanno) self.ui.comboBox_indvDetXLabel.clear() self.ui.comboBox_indvDetXLabel.addItems(floatsamplelognamelist) raise RuntimeError("This X-label combo box should be set up during loading data before.") xlabel=str(self.ui.comboBox_indvDetXLabel.currentText()) # get data vecx, vecy = self._myControl.getSampleLogValue(expno, scanno, samplelogname, xlabel) # Plot to canvas canvas = self.ui.graphicsView_indvDet # FUTURE - Clear canvas (think of a case that no need to clear canvas) canvas.clearCanvas() # canvas.clearAllLines() marker, color = canvas.getNextLineMarkerColorCombo() if xlabel is None: xlabel = r'Pt' label = samplelogname canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='Counts',label=label) # auto setup for image boundary xmin = min(vecx) xmax = max(vecx) ymin = min(vecy) ymax = max(vecy) dx = xmax-xmin dy = ymax-ymin canvas.setXYLimit(xmin-dx*0.0001, xmax+dx*0.0001, ymin-dy*0.0001, ymax+dy*0.0001) return True def _plotVanadiumRun(self, exp, scan, xlabel, label, clearcanvas=False, TempData=False): """ Plot processed vanadium data Arguments: - TempData :: flag whether the vanadium run is a temporary data set """ # Check whether the data is load exp = int(exp) scan = int(scan) if self._myControl.hasReducedWS(exp, scan) is False: self._logWarning("No data to plot!") return # Get data to plot try: vecx, vecy = self._myControl.getVectorProcessVanToPlot(exp, scan, TempData) if TempData is False: vecx, vecyOrig = self._myControl.getVectorToPlot(exp, scan) diffY = vecyOrig - vecy except NotImplementedError as e: errmsg = '[Error] Unable to retrieve processed vanadium spectrum for exp %d scan %d. ' \ 'Reason: %s' % (exp, scan, str(e)) QtGui.QMessageBox.information(self, "Return!", errmsg) return # Get to know whether it is required to clear the image canvas = self.ui.graphicsView_vanPeaks if TempData is True: clearcanvas = False if clearcanvas is True: canvas.clearAllLines() canvas.setLineMarkerColorIndex(0) # get the marker color for the line if TempData is True: marker = None color = 'blue' else: marker, color = canvas.getNextLineMarkerColorCombo() # plot canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='intensity',label=label) if TempData is False: canvas.add_plot1d(vecx, diffY, marker='+', color='green', x_label=xlabel, y_label='intensity',label='Diff') # reset canvas limits if clearcanvas is True: xmax = max(vecx) xmin = min(vecx) dx = xmax-xmin ymax = max(vecy) ymin = min(diffY) dy = ymax-ymin canvas.setXYLimit(xmin-dx*0.1, xmax+dx*0.1, ymin-dy*0.1, ymax+dy*0.1) # ENDIF return def _uiDownloadDataFile(self, exp, scan): """ Download data file according to its exp and scan Either download the data from a server or copy the data file from local disk """ # Get on hold of raw data file useserver = self.ui.radioButton_useServer.isChecked() uselocal = self.ui.radioButton_useLocal.isChecked() if useserver == uselocal: self._logError("It is logically wrong to set up server/local dir for data.") useserver = True uselocal = False self.ui.radioButton_useServer.setChecked(True) self.ui.radioButton_useLocal.setChecked(False) # ENDIF rvalue = False if self._srcFromServer is True: # Use server: build the URl to download data if self._serverAddress.endswith('/') is False: self._serverAddress += '/' fullurl = "%s%s/exp%d/Datafiles/%s_exp%04d_scan%04d.dat" % (self._serverAddress, self._instrument.lower(), exp, self._instrument.upper(), exp, scan) print("URL: ", fullurl) cachedir = str(self.ui.lineEdit_cache.text()).strip() if os.path.exists(cachedir) is False: invalidcache = cachedir cachedir = os.getcwd() self.ui.lineEdit_cache.setText(cachedir) self._logWarning("Cache directory %s is not valid. " "Using current workspace directory %s as cache." % (invalidcache, cachedir) ) filename = '%s_exp%04d_scan%04d.dat' % (self._instrument.upper(), exp, scan) srcFileName = os.path.join(cachedir, filename) status, errmsg = urllib.downloadFile(fullurl, srcFileName) if status is False: self._logError(errmsg) srcFileName = None else: rvalue = True elif self._srcAtLocal is True: # Data from local srcFileName = os.path.join(self._localSrcDataDir, "%s/Exp%d_Scan%04d.dat" % (self._instrument, exp, scan)) if os.path.exists(srcFileName) is True: rvalue = True else: raise NotImplementedError("Logic error. Neither downloaded from server.\ Nor from local drive") return (rvalue,srcFileName) def _uiGetBinningParams(self, itab): """ Get binning parameters Return: - xmin, binsize, xmax """ # Get value if itab == 2: xmin = str(self.ui.lineEdit_xmin.text()) xmax = str(self.ui.lineEdit_xmax.text()) binsize = str(self.ui.lineEdit_binsize.text()) elif itab == 3: xmin = str(self.ui.lineEdit_mergeMinX.text()) xmax = str(self.ui.lineEdit_mergeMaxX.text()) binsize = str(self.ui.lineEdit_mergeBinSize.text()) elif itab == 4: xmin = str(self.ui.lineEdit_min2Theta.text()) xmax = str(self.ui.lineEdit_max2Theta.text()) binsize = str(self.ui.lineEdit_binsize2Theta.text()) else: raise NotImplementedError("Binning parameters are not used for %d-th tab."%(itab)) # Parse values try: xmin = float(xmin) xmax = float(xmax) except ValueError: xmin = None xmax = None else: if xmin >= xmax: raise NotImplementedError("set minimum X = %.5f is larger than \ maximum X = %.5f" % (xmin, xmax)) try: binsize = float(binsize) except ValueError: raise NotImplementedError("Error: bins size '%s' is not a float number." % (binsize)) # Fix for merging as xmin and xmax must be same for all scans if itab == 3 and xmin is None: xmin = 5. xmax = 150. return (xmin, binsize, xmax) def _uiGetExcludedDetectors(self): """ Get excluded detectors from input line edit Return :: list of detector IDs to exclude from reduction """ excludedetidlist = [] if self.ui.checkBox_useDetExcludeFile.isChecked(): detids_str = str(self.ui.lineEdit_detExcluded.text()).strip() status, excludedetidlist = self._getIntArray(detids_str) if status is False: self._logError("Extra scans are not a list of integers: %s." % ( str(self.ui.lineEdit_extraScans.text()))) excludedetidlist = [] # ENDIF # ENDIF return excludedetidlist def _uiGetExpScanNumber(self): """ Get experiment number and scan number from widgets for merged """ expnostr = self.ui.lineEdit_expNo.text() scannostr = self.ui.lineEdit_scanNo.text() try: expno = int(expnostr) scanno = int(scannostr) except ValueError: raise NotImplementedError("Either Exp No '%s' or Scan No '%s \ is not set up right as integer." % (expnostr, scannostr)) return (expno, scanno) def _uiGetExpScanTabMultiScans(self): """ Get exp number and scans from tab 3 """ try: expno = int(self.ui.lineEdit_expNo.text()) startscan = int(self.ui.lineEdit_scanStart.text()) endscan = int(self.ui.lineEdit_scanEnd.text()) except ValueError as e: raise RuntimeError("For merging scans, Exp No, Starting scan number and \ end scan number must be given: %s" % (str(e))) # scans = [startscan, endscan] + [others] - [excluded] status, extrascanlist = self._getIntArray(str(self.ui.lineEdit_extraScans.text())) if status is False: raise RuntimeError(extrascanlist) status, excludedlist = self._getIntArray(str(self.ui.lineEdit_exclScans.text())) self._logDebug("Excluded list: %s" %(str(excludedlist))) if status is False: self._logError(excludedlist) return scanslist = list(range(startscan, endscan+1)) scanslist.extend(extrascanlist) scanslist = list(set(scanslist)) for scan in excludedlist: scanslist.remove(scan) return (expno, sorted(scanslist)) def _uiIsBinParamsChange(self, itab, binparams): """ Check whether current bin parameters are same as given value """ xmin,binsize,xmax = self._uiGetBinningParams(itab) newbinparams = [xmin, binsize, xmax] # check binning same = True for i in range(3): par_0 = binparams[i] par_1 = newbinparams[i] try: if abs(float(par_0)-float(par_1)) > 1.0E-6: same = False except TypeError: if par_0 is not None or par_1 is not None: same = False if same is False: break # ENDFOR change = not same if change is True: print("[D...............B]", end=' ') print("%s vs %s " % (str(xmin), str(self._tabBinParamDict[itab][0])), end=' ') print("%s vs %s " % (str(xmax), str(self._tabBinParamDict[itab][2])), end=' ') print("%s vs %s " % (str(binsize), str(self._tabBinParamDict[itab][1]))) else: print("[DB] Rebin = False") return change def _uiReduceData(self, itab, unit, expno=None, scanno=None): """ Rebin and plot by reading GUI widgets' value Arguments: - itab : index of the tab. Only 2m 3 and 4 are allowed - unit : string for target unit """ # Experiment number and Scan number if isinstance(expno, int) and isinstance(scanno, int): # Call from tab-3 multiple scan pass else: try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError(str(e)) return # ENDIF # Get binning parameter xmin, binsize, xmax = self._uiGetBinningParams(itab) # Get wavelength try: if itab == 3: wavelength = float(self._myControl.getWavelength(expno, scanno)) else: wavelength = float(str(self.ui.lineEdit_wavelength.text())) except TypeError: if unit != '2theta': raise NotImplementedError('Wavelength must be specified for unit %s.'%(unit)) # Get scale factor try: scalefactor = self._getFloat(self.ui.lineEdit_normalizeMonitor) except EmptyError: scalefactor = None except ValueError as valueerror: raise ValueError("Unable to get normalization factor due to %s."%(str(valueerror))) # Rebin try: # rebinned = self._myControl.rebin(expno, scanno, unit, wavelength, xmin, binsize, xmax) excludeddetlist = self._uiGetExcludedDetectors() self._myControl.reduceSpicePDData(expno, scanno, unit, xmin, xmax, binsize, wavelength, excludeddetlist, scalefactor) # Record binning self._tabBinParamDict[itab] = [xmin, binsize, xmax] except NotImplementedError as e: self._logError(str(e)) return (False, expno, scanno) return (True, expno, scanno) def _uiReducePlotNoramlized(self, unit): """ Support Reduce2Theta, ReduceDspacing and ReduceQ """ itab = 2 canvas = self.ui.graphicsView_reducedData expno, scanno = self._uiGetExpScanNumber() change = self._uiIsBinParamsChange(itab, self._tabBinParamDict[itab]) # check whether line record if unit == self._currUnit and \ self._tabLineDict[itab].count((expno, scanno)) > 0 and change is False: # there is no need to plot again as line exists return # reduce r = self._uiReduceData(2, unit) good = r[0] expno = r[1] scanno = r[2] # failed to reduce if good is False: self._logError("Failed to reduce Exp %d Scan %d" % (expno, scanno)) return # clear canvas??? if unit != self._currUnit: clearcanvas = True elif self.ui.checkBox_clearPrevious.isChecked() is False: # NOTE: naming of the widget is VERY confusing. Should be changed to keepPrevious clearcanvas = True else: clearcanvas = False # reset record dictionary if unit is different from present if clearcanvas is True: self._tabLineDict[itab] = [] self._currUnit = unit self._tabLineDict[itab].append((expno, scanno)) xlabel = self._getXLabelFromUnit(unit) label = "Exp %s Scan %s"%(str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=clearcanvas) return def _logDebug(self, dbinfo): """ Log debug information """ print(dbinfo) def _logError(self, errinfo): """ Log error information """ QtGui.QMessageBox.information(self, "Click!", errinfo) def _logNotice(self, loginfo): """ Log error information """ msg = '[Notice] %s' % loginfo print(msg) # QtGui.QMessageBox.information(self, "Click!", msg) def _logWarning(self, warning_info): """ Log error information """ msg = "[Warning]: %s" % (warning_info) QtGui.QMessageBox.information(self, "OK!", msg) return def _getFloat(self, lineedit): """ Get integer from line edit Exception: ValueError if empty or no input """ valuestr = str(lineedit.text()).strip() if len(valuestr) == 0: raise EmptyError("Input is empty. It cannot be converted to float.") try: value = float(valuestr) except ValueError as e: raise e return value def _getInteger(self, lineedit): """ Get integer from line edit """ valuestr = str(lineedit.text()).strip() if len(valuestr) == 0: raise EmptyError("Input is empty. It cannot be converted to integer.") try: value = int(valuestr) except ValueError as e: raise e return value def _getIntArray(self, intliststring): """ Validate whether the string can be divided into integer strings. Allowed: a, b, c-d, e, f Return :: 2-tuple (status, list/error message) """ intliststring = str(intliststring) if intliststring == "": return (True, []) # Split by "," termlevel0s = intliststring.split(",") intlist = [] # For each term errmsg = "" returnstatus = True for level0term in termlevel0s: level0term = level0term.strip() # split upon dash - numdashes = level0term.count("-") if numdashes == 0: # one integer valuestr = level0term try: intvalue = int(valuestr) if str(intvalue) != valuestr: returnstatus = False errmsg = "Contains non-integer string %s." % (valuestr) except ValueError: returnstatus = False errmsg = "String %s is not an integer." % (valuestr) else: intlist.append(intvalue) elif numdashes == 1: # Integer range twoterms = level0term.split("-") templist = [] for i in range(2): valuestr = twoterms[i] try: intvalue = int(valuestr) if str(intvalue) != valuestr: returnstatus = False errmsg = "Contains non-integer string %s." % (valuestr) except ValueError: returnstatus = False errmsg = "String %s is not an integer." % (valuestr) else: templist.append(intvalue) # break loop if returnstatus is False: break # ENDFOR intlist.extend(range(templist[0], templist[1]+1)) else: # Undefined siutation returnstatus = False errmsg = "Term %s contains more than 1 dash." % (level0term) # ENDIFELSE # break loop if something is wrong if returnstatus is False: break # ENDFOR # Return with false if returnstatus is False: return (False, errmsg) return (True, intlist) def _getXLabelFromUnit(self, unit): """ Get X-label from unit """ if unit == '2theta': xlabel = r'$2\theta$ (Degrees)' elif unit == 'dSpacing': xlabel = r"d $(\AA)$" elif unit == 'Momentum Transfer (Q)': xlabel = r"Q $(\AA^{-1})$" else: xlabel = 'Wacky Unknown' return xlabel
gpl-3.0
-5,060,320,638,628,770,000
35.077617
139
0.570344
false
hesseltuinhof/mxnet
example/image-classification/common/data.py
2
6769
import mxnet as mx import random from mxnet.io import DataBatch, DataIter import numpy as np def add_data_args(parser): data = parser.add_argument_group('Data', 'the input images') data.add_argument('--data-train', type=str, help='the training data') data.add_argument('--data-val', type=str, help='the validation data') data.add_argument('--rgb-mean', type=str, default='123.68,116.779,103.939', help='a tuple of size 3 for the mean rgb') data.add_argument('--pad-size', type=int, default=0, help='padding the input image') data.add_argument('--image-shape', type=str, help='the image shape feed into the network, e.g. (3,224,224)') data.add_argument('--num-classes', type=int, help='the number of classes') data.add_argument('--num-examples', type=int, help='the number of training examples') data.add_argument('--data-nthreads', type=int, default=4, help='number of threads for data decoding') data.add_argument('--benchmark', type=int, default=0, help='if 1, then feed the network with synthetic data') return data def add_data_aug_args(parser): aug = parser.add_argument_group( 'Image augmentations', 'implemented in src/io/image_aug_default.cc') aug.add_argument('--random-crop', type=int, default=1, help='if or not randomly crop the image') aug.add_argument('--random-mirror', type=int, default=1, help='if or not randomly flip horizontally') aug.add_argument('--max-random-h', type=int, default=0, help='max change of hue, whose range is [0, 180]') aug.add_argument('--max-random-s', type=int, default=0, help='max change of saturation, whose range is [0, 255]') aug.add_argument('--max-random-l', type=int, default=0, help='max change of intensity, whose range is [0, 255]') aug.add_argument('--max-random-aspect-ratio', type=float, default=0, help='max change of aspect ratio, whose range is [0, 1]') aug.add_argument('--max-random-rotate-angle', type=int, default=0, help='max angle to rotate, whose range is [0, 360]') aug.add_argument('--max-random-shear-ratio', type=float, default=0, help='max ratio to shear, whose range is [0, 1]') aug.add_argument('--max-random-scale', type=float, default=1, help='max ratio to scale') aug.add_argument('--min-random-scale', type=float, default=1, help='min ratio to scale, should >= img_size/input_shape. otherwise use --pad-size') return aug def set_data_aug_level(aug, level): if level >= 1: aug.set_defaults(random_crop=1, random_mirror=1) if level >= 2: aug.set_defaults(max_random_h=36, max_random_s=50, max_random_l=50) if level >= 3: aug.set_defaults(max_random_rotate_angle=10, max_random_shear_ratio=0.1, max_random_aspect_ratio=0.25) class SyntheticDataIter(DataIter): def __init__(self, num_classes, data_shape, max_iter, dtype): self.batch_size = data_shape[0] self.cur_iter = 0 self.max_iter = max_iter self.dtype = dtype label = np.random.randint(0, num_classes, [self.batch_size,]) data = np.random.uniform(-1, 1, data_shape) self.data = mx.nd.array(data, dtype=self.dtype, ctx=mx.Context('cpu_pinned', 0)) self.label = mx.nd.array(label, dtype=self.dtype, ctx=mx.Context('cpu_pinned', 0)) def __iter__(self): return self @property def provide_data(self): return [mx.io.DataDesc('data', self.data.shape, self.dtype)] @property def provide_label(self): return [mx.io.DataDesc('softmax_label', (self.batch_size,), self.dtype)] def next(self): self.cur_iter += 1 if self.cur_iter <= self.max_iter: return DataBatch(data=(self.data,), label=(self.label,), pad=0, index=None, provide_data=self.provide_data, provide_label=self.provide_label) else: raise StopIteration def __next__(self): return self.next() def reset(self): self.cur_iter = 0 def get_rec_iter(args, kv=None): image_shape = tuple([int(l) for l in args.image_shape.split(',')]) if 'benchmark' in args and args.benchmark: data_shape = (args.batch_size,) + image_shape train = SyntheticDataIter(args.num_classes, data_shape, 500, np.float32) return (train, None) if kv: (rank, nworker) = (kv.rank, kv.num_workers) else: (rank, nworker) = (0, 1) rgb_mean = [float(i) for i in args.rgb_mean.split(',')] train = mx.io.ImageRecordIter( path_imgrec = args.data_train, label_width = 1, mean_r = rgb_mean[0], mean_g = rgb_mean[1], mean_b = rgb_mean[2], data_name = 'data', label_name = 'softmax_label', data_shape = image_shape, batch_size = args.batch_size, rand_crop = args.random_crop, max_random_scale = args.max_random_scale, pad = args.pad_size, fill_value = 127, min_random_scale = args.min_random_scale, max_aspect_ratio = args.max_random_aspect_ratio, random_h = args.max_random_h, random_s = args.max_random_s, random_l = args.max_random_l, max_rotate_angle = args.max_random_rotate_angle, max_shear_ratio = args.max_random_shear_ratio, rand_mirror = args.random_mirror, preprocess_threads = args.data_nthreads, shuffle = True, num_parts = nworker, part_index = rank) if args.data_val is None: return (train, None) val = mx.io.ImageRecordIter( path_imgrec = args.data_val, label_width = 1, mean_r = rgb_mean[0], mean_g = rgb_mean[1], mean_b = rgb_mean[2], data_name = 'data', label_name = 'softmax_label', batch_size = args.batch_size, data_shape = image_shape, preprocess_threads = args.data_nthreads, rand_crop = False, rand_mirror = False, num_parts = nworker, part_index = rank) return (train, val)
apache-2.0
-6,172,175,459,604,209,000
45.363014
110
0.554587
false
googlecodelabs/nest-tensorflow
codelab/classify.py
1
3339
#!/usr/bin/python # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib2 import os.path import numpy as np import tensorflow as tf from node_lookup import NodeLookup from errors import error_result snapshot_file = 'tmp/tmp.jpg' model_dir = 'tmp/imagenet' num_top_predictions = 5 def classify_remote_image(image_url): # Attempt to Download try: image = download_image(image_url) except IOError: return error_result("Camera's Snapshot URL could not be downloaded") # Attempt to Classify try: results = run_inference_on_image(image) except: return error_result("Could not classify the image") return { "image_url": image_url, "results": results } def create_graph(): with tf.gfile.FastGFile(os.path.join( model_dir, 'classify_image_graph_def.pb' ), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') def run_inference_on_image(image): """Runs inference on an image. Args: image: Image file name. Returns: Nothing """ if not tf.gfile.Exists(image): tf.logging.fatal('File does not exist %s', image) image_data = tf.gfile.FastGFile(image, 'rb').read() # Creates graph from saved GraphDef. create_graph() with tf.Session() as sess: # Some useful tensors: # 'softmax:0': A tensor containing the normalized prediction across # 1000 labels. # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 # float description of the image. # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG # encoding of the image. # Runs the softmax tensor by feeding the image_data as input to the graph. softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = NodeLookup() top_k = predictions.argsort()[-num_top_predictions:][::-1] results = {} for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] results[human_string] = float(score) return results def download_image(url): # Downloads the image from the specified URL to the filesystem response = urllib2.urlopen(url) body = response.read() if body == '': raise IOError('The Snapshot URL did not contain any HTTP body when fetched') with open(snapshot_file, 'w') as f: f.write(body) return snapshot_file
apache-2.0
8,367,322,700,183,847,000
30.5
84
0.649596
false
joergdietrich/astropy
astropy/visualization/tests/test_interval.py
2
4086
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from ...tests.helper import pytest from ...utils import NumpyRNGContext from ..interval import (ManualInterval, MinMaxInterval, PercentileInterval, AsymmetricPercentileInterval, ZScaleInterval) class TestInterval(object): data = np.linspace(-20., 60., 100) def test_manual(self): interval = ManualInterval(-10., +15.) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -10.) np.testing.assert_allclose(vmax, +15.) def test_manual_defaults(self): interval = ManualInterval(vmin=-10.) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -10.) np.testing.assert_allclose(vmax, np.max(self.data)) interval = ManualInterval(vmax=15.) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, np.min(self.data)) np.testing.assert_allclose(vmax, 15.) def test_minmax(self): interval = MinMaxInterval() vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -20.) np.testing.assert_allclose(vmax, +60.) def test_percentile(self): interval = PercentileInterval(62.2) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -4.88) np.testing.assert_allclose(vmax, 44.88) def test_asymmetric_percentile(self): interval = AsymmetricPercentileInterval(10.5, 70.5) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -11.6) np.testing.assert_allclose(vmax, 36.4) def test_asymmetric_percentile_nsamples(self): with NumpyRNGContext(12345): interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -14.367676767676768) np.testing.assert_allclose(vmax, 40.266666666666666) class TestIntervalList(TestInterval): # Make sure intervals work with lists data = np.linspace(-20., 60., 100).tolist() class TestInterval2D(TestInterval): # Make sure intervals work with 2d arrays data = np.linspace(-20., 60., 100).reshape(100, 1) def test_zscale(): np.random.seed(42) data = np.random.randn(100, 100) * 5 + 10 interval = ZScaleInterval() vmin, vmax = interval.get_limits(data) np.testing.assert_allclose(vmin, -9.6, atol=0.1) np.testing.assert_allclose(vmax, 25.4, atol=0.1) data = list(range(1000)) + [np.nan] interval = ZScaleInterval() vmin, vmax = interval.get_limits(data) np.testing.assert_allclose(vmin, 0, atol=0.1) np.testing.assert_allclose(vmax, 999, atol=0.1) data = list(range(100)) interval = ZScaleInterval() vmin, vmax = interval.get_limits(data) np.testing.assert_allclose(vmin, 0, atol=0.1) np.testing.assert_allclose(vmax, 99, atol=0.1) def test_integers(): # Need to make sure integers get cast to float interval = MinMaxInterval() values = interval([1, 3, 4, 5, 6]) np.testing.assert_allclose(values, [0., 0.4, 0.6, 0.8, 1.0]) # Don't accept integer array in output out = np.zeros(5, dtype=int) with pytest.raises(TypeError) as exc: values = interval([1, 3, 4, 5, 6], out=out) assert exc.value.args[0] == ("Can only do in-place scaling for " "floating-point arrays") # But integer input and floating point output is fine out = np.zeros(5, dtype=float) interval([1, 3, 4, 5, 6], out=out) np.testing.assert_allclose(out, [0., 0.4, 0.6, 0.8, 1.0]) def test_constant_data(): """Test intervals with constant data (avoiding divide-by-zero).""" shape = (10, 10) data = np.ones(shape) interval = MinMaxInterval() limits = interval.get_limits(data) values = interval(data) np.testing.assert_allclose(limits, (1., 1.)) np.testing.assert_allclose(values, np.zeros(shape))
bsd-3-clause
4,630,598,436,204,791,000
33.336134
77
0.64929
false
liantian-cn/ms17_010_scanner_gui
lib/appJar/appjar.py
1
300568
# -*- coding: utf-8 -*- """appJar.py: Provides a GUI class, for making simple tkinter GUIs.""" # Nearly everything I learnt came from: http://effbot.org/tkinterbook/ # with help from: http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/index.html # with snippets from stackexchange.com # make print backwards compatible from __future__ import print_function try: # for Python2 from Tkinter import * import tkMessageBox as MessageBox from tkColorChooser import askcolor import tkFileDialog as filedialog import ScrolledText as scrolledtext import tkFont as font PYTHON2 = True PY_NAME = "Python" except ImportError: # for Python3 from tkinter import * from tkinter import messagebox as MessageBox from tkinter.colorchooser import askcolor from tkinter import filedialog from tkinter import scrolledtext from tkinter import font PYTHON2 = False PY_NAME = "python3" import os import sys import re import hashlib import imghdr import time import __main__ as theMain from platform import system as platform # Links import webbrowser # ajTree try: from idlelib.TreeWidget import TreeItem, TreeNode, ZoomHeight except: try: from idlelib.tree import TreeItem, TreeNode from idlelib.zoomheight import ZoomHeight except: raise Exception("Unsupported python build, unable to access idlelib") from xml.dom.minidom import parseString # DatePicker import calendar import datetime ####################### # import borrowed libraries - not compulsory ####################### try: from appJar.lib.tooltip import ToolTip TOOLTIP_AVAILABLE = True except: TOOLTIP_AVAILABLE = False try: from appJar.lib.tkinter_png import * TKINTERPNG_AVAILABLE = True except: TKINTERPNG_AVAILABLE = False try: from appJar.lib import nanojpeg NANOJPEG_AVAILABLE = True except: NANOJPEG_AVAILABLE = False # only try to import winsound if we're on windows if platform() in ["win32", "Windows"]: import winsound # details __author__ = "Richard Jarvis" __copyright__ = "Copyright 2016, Richard Jarvis" __credits__ = ["Graham Turner", "Sarah Murch"] __license__ = "GPL" __version__ = "0.1" __maintainer__ = "Richard Jarvis" __email__ = "info@appJar.info" __status__ = "Development" # class to allow simple creation of tkinter GUIs class gui(object): """ Class to represent the GUI - Create one of these - add some widgets - call the go() function """ @staticmethod def CLEAN_CONFIG_DICTIONARY(**kw): """ Used by all Classes to tidy up dictionaries passed into config functions Allows us to more quickly process the dictionaries when overriding config """ try: kw['bg'] = kw.pop('background') except: pass try: kw['fg'] = kw.pop('foreground') except: pass kw = dict((k.lower().strip(), v) for k, v in kw.items()) return kw # globals for supported platforms WINDOWS = 1 MAC = 2 LINUX = 3 @staticmethod def GET_PLATFORM(): # get the platform if platform() in ["win32", "Windows"]: return gui.WINDOWS elif platform() == "Darwin": return gui.MAC elif platform() == "Linux": return gui.LINUX else: raise Exception("Unsupported platform: " + platform()) @staticmethod def CENTER(win): """ centers a tkinter window http://stackoverflow.com/questions/3352918/ :param win: the root or Toplevel window to center """ win.attributes('-alpha', 0.0) # hide the window win.update_idletasks() width = win.winfo_width() frm_width = win.winfo_rootx() - win.winfo_x() win_width = width + 2 * frm_width height = win.winfo_height() titlebar_height = win.winfo_rooty() - win.winfo_y() win_height = height + titlebar_height + frm_width x = win.winfo_screenwidth() // 2 - win_width // 2 y = win.winfo_screenheight() // 2 - win_height // 2 y = y - 150 win.geometry('{}x{}+{}+{}'.format(width, height, x, y)) win.deiconify() win.attributes('-alpha', 1.0) built = False # used to identify widgets in component configurations WINDOW = 0 LABEL = 1 ENTRY = 2 BUTTON = 3 CHECKBOX = 4 SCALE = 5 RADIOBUTTON = 6 LISTBOX = 7 MESSAGE = 8 SPIN = 9 OPTION = 10 TEXTAREA = 11 LINK = 12 METER = 13 IMAGE = 14 PIECHART = 15 PROPERTIES = 16 GRID = 17 PLOT = 18 RB = 60 CB = 40 LB = 70 LABELFRAME = 30 FRAME = 36 TABBEDFRAME = 31 PANEDFRAME = 32 SCROLLPANE = 33 PAGEDWINDOW = 34 TOGGLEFRAME = 35 # positioning N = N NE = NE E = E SE = SE S = S SW = SW W = W NW = NW CENTER = CENTER LEFT = LEFT RIGHT = RIGHT # reliefs SUNKEN = SUNKEN RAISED = RAISED GROOVE = GROOVE RIDGE = RIDGE FLAT = FLAT # containers C_ROOT = 'rootPage' C_LABELFRAME = 'labelFrame' C_FRAME = 'frame' C_TOGGLEFRAME = 'toggleFrame' # 2 containers for pagedWindow C_PAGEDWINDOW = 'pagedWindow' C_PAGE = 'page' # 2 containers for tabbedFrame C_TABBEDFRAME = 'tabbedFrame' C_TAB = 'tab' # 2 containers for panedFrame C_PANEDFRAME = 'panedFrame' C_PANE = 'pane' C_SUBWINDOW = 'subWindow' C_SCROLLPANE = 'scrollPane' # names for each of the widgets defined above # used for defining functions WIDGETS = { LABEL: "Label", MESSAGE: "Message", BUTTON: "Button", ENTRY: "Entry", CB: "Cb", SCALE: "Scale", RB: "Rb", GRID: "Grid", LB: "Lb", SPIN: "SpinBox", OPTION: "OptionBox", TEXTAREA: "TextArea", LINK: "Link", METER: "Meter", PLOT: "Plot", IMAGE: "Image", RADIOBUTTON: "RadioButton", CHECKBOX: "CheckBox", LISTBOX: "ListBox", PIECHART: "PieChart", PROPERTIES: "Properties", FRAME: "Frame", LABELFRAME: "LabelFrame", PANEDFRAME: "PanedFrame", TOGGLEFRAME: "ToggleFrame", TABBEDFRAME: "TabbedFrame"} # music stuff BASIC_NOTES = { "A": 440, "B": 493, "C": 261, "D": 293, "E": 329, "F": 349, "G": 392} NOTES = { 'f8': 5587, 'c#6': 1108, 'f4': 349, 'c7': 2093, 'd#2': 77, 'g8': 6271, 'd4': 293, 'd7': 2349, 'd#7': 2489, 'g#4': 415, 'e7': 2637, 'd9': 9397, 'b8': 7902, 'a#4': 466, 'b5': 987, 'b2': 123, 'g#9': 13289, 'g9': 12543, 'f#2': 92, 'c4': 261, 'e1': 41, 'e6': 1318, 'a#8': 7458, 'c5': 523, 'd6': 1174, 'd3': 146, 'g7': 3135, 'd2': 73, 'd#3': 155, 'g#6': 1661, 'd#4': 311, 'a3': 219, 'g2': 97, 'c#5': 554, 'd#9': 9956, 'a8': 7040, 'a#5': 932, 'd#5': 622, 'a1': 54, 'g#8': 6644, 'a2': 109, 'g#5': 830, 'f3': 174, 'a6': 1760, 'e8': 5274, 'c#9': 8869, 'f5': 698, 'b1': 61, 'c#4': 277, 'f#9': 11839, 'e5': 659, 'f9': 11175, 'f#5': 739, 'a#1': 58, 'f#8': 5919, 'b7': 3951, 'c#8': 4434, 'g1': 48, 'c#3': 138, 'f#7': 2959, 'c6': 1046, 'c#2': 69, 'c#7': 2217, 'c3': 130, 'e9': 10548, 'c9': 8372, 'a#6': 1864, 'a#7': 3729, 'g#2': 103, 'f6': 1396, 'b3': 246, 'g#3': 207, 'b4': 493, 'a7': 3520, 'd#6': 1244, 'd#8': 4978, 'f2': 87, 'd5': 587, 'f7': 2793, 'f#6': 1479, 'g6': 1567, 'e3': 164, 'f#3': 184, 'g#1': 51, 'd8': 4698, 'f#4': 369, 'f1': 43, 'c8': 4186, 'g4': 391, 'g3': 195, 'a4': 440, 'a#3': 233, 'd#1': 38, 'e2': 82, 'e4': 329, 'a5': 880, 'a#2': 116, 'g5': 783, 'g#7': 3322, 'b6': 1975, 'c2': 65, 'f#1': 46} DURATIONS = { "BREVE": 2000, "SEMIBREVE": 1000, "MINIM": 500, "CROTCHET": 250, "QUAVER": 125, "SEMIQUAVER": 63, "DEMISEMIQUAVER": 32, "HEMIDEMISEMIQUAVER": 16} ##################################### # CONSTRUCTOR - creates the GUI ##################################### def __init__(self, title=None, geom=None, warn=True, debug=False): # first out, verify the platform self.platform = gui.GET_PLATFORM() self.WARN = warn self.DEBUG = debug # a stack to hold containers as being built # done here, as initArrays is called elsewhere - to reset the gubbins self.containerStack = [] # first up, set up all the data stores self.__initArrays() # dynamically create lots of functions for configuring stuff self.__buildConfigFuncs() # language parser self.config = None # set up some default path locations self.lib_file = os.path.abspath(__file__) self.exe_file = os.path.basename(theMain.__file__) self.exe_loc = os.path.dirname(theMain.__file__) # location of appJar self.lib_path = os.path.dirname(self.lib_file) self.resource_path = os.path.join(self.lib_path, "resources") self.icon_path = os.path.join(self.resource_path, "icons") self.sound_path = os.path.join(self.resource_path, "sounds") self.appJarIcon = os.path.join(self.icon_path, "favicon.ico") # user configurable self.userImages = self.exe_loc self.userSounds = self.exe_loc # create the main window - topLevel self.topLevel = Tk() self.topLevel.bind('<Configure>', self.__windowEvent) # override close button self.topLevel.protocol("WM_DELETE_WINDOW", self.stop) # temporarily hide it self.topLevel.withdraw() self.locationSet = False # used to keep a handle on the last pop-up dialog # allows the dialog to be closed remotely # mainly for test-automation self.topLevel.POP_UP = None # create a frame to store all the widgets self.appWindow = Frame(self.topLevel) self.appWindow.pack(fill=BOTH, expand=True) # set the windows title if title is None: title = self.exe_file self.setTitle(title) # configure the geometry of the window self.topLevel.escapeBindId = None # used to exit fullscreen self.topLevel.stopFunction = None # used to exit fullscreen self.setGeom(geom) # set the resize status - default to True self.setResizable(True) # set up fonts self.buttonFont = font.Font(family="Helvetica", size=12,) self.labelFont = font.Font(family="Helvetica", size=12) self.entryFont = font.Font(family="Helvetica", size=12) self.messageFont = font.Font(family="Helvetica", size=12) self.rbFont = font.Font(family="Helvetica", size=12) self.cbFont = font.Font(family="Helvetica", size=12) self.tbFont = font.Font(family="Helvetica", size=12) self.scaleFont = font.Font(family="Helvetica", size=12) self.statusFont = font.Font(family="Helvetica", size=12) self.spinFont = font.Font(family="Helvetica", size=12) self.optionFont = font.Font(family="Helvetica", size=12) self.lbFont = font.Font(family="Helvetica", size=12) self.taFont = font.Font(family="Helvetica", size=12) self.meterFont = font.Font(family="Helvetica", size=12, weight='bold') self.linkFont = font.Font( family="Helvetica", size=12, weight='bold', underline=1) self.labelFrameFont = font.Font(family="Helvetica", size=12) self.frameFont = font.Font(family="Helvetica", size=12) self.toggleFrameFont = font.Font(family="Helvetica", size=12) self.tabbedFrameFont = font.Font(family="Helvetica", size=12) self.panedFrameFont = font.Font(family="Helvetica", size=12) self.scrollPaneFont = font.Font(family="Helvetica", size=12) self.propertiesFont = font.Font(family="Helvetica", size=12) self.gridFont = font.Font(family="Helvetica", size=12) # self.fgColour = self.topLevel.cget("foreground") # self.buttonFgColour = self.topLevel.cget("foreground") # self.labelFgColour = self.topLevel.cget("foreground") # create a menu bar - only shows if populated # now created in menu functions, as it generated a blank line... self.hasMenu = False self.hasStatus = False self.hasTb = False self.copyAndPaste = CopyAndPaste(self.topLevel) # won't pack, if don't pack it here self.tb = Frame(self.appWindow, bd=1, relief=RAISED) self.tb.pack(side=TOP, fill=X) # create the main container for this GUI container = Frame(self.appWindow) # container = Label(self.appWindow) # made as a label, so we can set an # image container.config(padx=2, pady=2, background=self.topLevel.cget("bg")) container.pack(fill=BOTH, expand=True) self.__addContainer("root", self.C_ROOT, container, 0, 1) # set up the main container to be able to host an image self.__configBg(container) # an array to hold any threaded events.... self.events = [] self.pollTime = 250 self.built = True if self.platform == self.WINDOWS: try: self.topLevel.wm_iconbitmap(self.appJarIcon) except: # file not found self.debug("Error setting Windows default icon") def __configBg(self, container): # set up a background image holder # alternative to label option above, as label doesn't update widgets # properly self.bgLabel = Label(container) self.bgLabel.config( anchor=CENTER, font=self.labelFont, background=self.__getContainerBg()) self.bgLabel.place(x=0, y=0, relwidth=1, relheight=1) container.image = None ##################################### # set the arrays we use to store everything ##################################### def __initArrays(self): # set up a row counter - used to auto add rows # breaks once user sets own row # set up a minimum label width for label combos self.labWidth = 1 # validate function callbacks - used by numeric texts # created first time a widget is used self.validateNumeric = None self.validateSpinBox = None # set up flash variable self.doFlash = False # used to hide/show title bar self.hasTitleBar = True # records if we're in fullscreen - stops hideTitle from breaking self.isFullscreen = False # splash screen? self.splashConfig = None # collections of widgets, widget name is key self.n_frames = [] # un-named, so no direct access self.n_labels = {} self.n_buttons = {} self.n_entries = {} self.n_messages = {} self.n_scales = {} self.n_cbs = {} self.n_rbs = {} self.n_lbs = {} self.n_tbButts = {} self.n_spins = {} self.n_props = {} self.n_plots = {} self.n_options = {} self.n_frameLabs = {} self.n_textAreas = {} self.n_links = {} self.n_meters = {} self.n_subWindows = {} self.n_labelFrames = {} self.n_ajFrame = {} self.n_tabbedFrames = {} self.n_panedFrames = {} self.n_panes = {} self.n_pagedWindows = {} self.n_toggleFrames = {} self.n_scrollPanes = {} self.n_trees = {} self.n_flashLabs = [] self.n_pieCharts = {} self.n_separators = [] # completed containers - in case we want to open them again self.n_usedContainers = {} # variables associated with widgets self.n_entryVars = {} self.n_optionVars = {} self.n_boxVars = {} self.n_rbVars = {} self.n_rbVals = {} self.n_images = {} # image label widgets self.n_imageCache = {} # image file objects self.n_imageAnimationIds = {} # stores after ids self.n_taHashes = {} # for monitoring textAreas # for simple grids self.n_grids = {} # menu stuff self.n_menus = {} self.n_menuVars = {} self.n_accelerators = [] def setLanguage(self, language): try: from configparser import ConfigParser except: self.warn("Internationalisation not supported") self.config = None return self.config = ConfigParser() self.changeLanguage(language) # function to update languages def changeLanguage(self, language): if self.config is None: self.warn("Internationalisation not supported") return language = language.upper() import codecs if not PYTHON2: try: with codecs.open(language + ".ini", "r", "utf8") as langFile: self.config.read_file(langFile) except FileNotFoundError: self.warn("Invalid language: " + language) return else: try: with codecs.open(language + ".ini", "r", "utf8") as langFile: self.config.read_file(langFile) except IOError: self.warn("Invalid language: " + language) return self.debug("Switching to: " + language) # loop through each section, get the relative set of widgets # change the text for section in self.config.sections(): section = section.upper() # skip the config section (for now) if section == "CONFIG": continue self.debug("\t" + section) # convert the section title to its code try: kind = vars(gui)[section] texts = self.config[section] except KeyError: self.warn("Invalid config section: " + section) continue # use the code to get the widget list widgets = self.__getItems(kind) if kind in [self.SCALE]: self.warn("No text is displayed in " + section + ". Maybe it has a Label?") continue elif kind in [self.TEXTAREA, self.METER]: self.warn("No text is displayed in " + section) continue elif kind in [self.PROPERTIES]: self.warn(section + " - list-style widgets are currently not supported") elif kind in [self.LISTBOX]: for k in widgets.keys(): lb = widgets[k] # convert data to a list data = texts.get(k, lb.DEFAULT_TEXT).strip().split("\n") # tidy up the list data = [item.strip() for item in data if len(item.strip()) > 0] self.updateListItems(k, data) elif kind in [self.SPIN]: for k in widgets.keys(): sb = widgets[k] # convert data to a list data = texts.get(k, sb.DEFAULT_TEXT).strip().split("\n") # tidy up the list data = [item.strip() for item in data if len(item.strip()) > 0] self.changeSpinBox(k, data) elif kind in [self.OPTION]: for k in widgets.keys(): ob = widgets[k] # convert data to a list data = texts.get(k, ob.DEFAULT_TEXT).strip().split("\n") # tidy up the list data = [item.strip() for item in data if len(item.strip()) > 0] self.changeOptionBox(k, data) elif kind in [self.RADIOBUTTON]: for (key, val) in self.config.items(section): keys = key.split("-") try: rbs = self.n_rbs[keys[0]] except KeyError: self.warn("Invalid RADIOBUTTON key: " + keys[0]) continue for rb in rbs: if rb.DEFAULT_TEXT == keys[1]: rb["text"] = val break elif kind in [self.PIECHART, self.GRID]: self.warn(section + " - widgets not yet implemented") continue elif kind == self.ENTRY: for k in widgets.keys(): ent = widgets[k] self.updateDefaultText(k, texts.get(k, ent.DEFAULT_TEXT)) self.debug("\t\t" + k + "=" + str(ent.default)) elif kind in [self.LABEL, self.BUTTON, self.CHECKBOX, self.MESSAGE, self.LINK]: # relabel each widget for k in widgets.keys(): widg = widgets[k] self.debug("\t\t" + k + "---->" + texts.get(k, widg.DEFAULT_TEXT)) widg.config(text = texts.get(k, widg.DEFAULT_TEXT)) self.debug("\t\t" + k + "=" + widg.cget("text")) else: self.warn("Unsupported widget: " + section) continue # function to generate warning messages def warn(self, message): if self.WARN: print("Warning -", message) # function to turn off warning messages def disableWarnings(self): self.WARN = False def enableWarnings(self): self.WARN = True # function to generate warning messages def debug(self, message): if self.DEBUG: print("Debug -", message) # function to turn on debug messages def enableDebug(self): self.DEBUG = True def disableDebug(self): self.DEBUG = False # function to turn on the splash screen def showSplash(self, text="appJar", fill="red", stripe="black", fg="white", font=44): self.splashConfig= {'text':text, 'fill':fill, 'stripe':stripe, 'fg':fg, 'font':font} ##################################### # Event Loop - must always be called at end ##################################### def go(self, language=None): """ Most important function! Start the GUI """ if self.splashConfig is not None: splash = SplashScreen( self.topLevel, self.splashConfig['text'], self.splashConfig['fill'], self.splashConfig['stripe'], self.splashConfig['fg'], self.splashConfig['font'] ) self.topLevel.withdraw() self.__bringToFront(splash) # if language is populated, we are in internationalisation mode # call the setLanguage function - to re-badge all the widgets if language is not None: self.setLanguage(language) # check the containers have all been stopped if len(self.containerStack) > 1: self.warn("You didn't stop all containers") for i in range(len(self.containerStack) - 1, 0, -1): kind = self.containerStack[i]['type'] if kind not in [self.C_PANE]: self.warn("STOP: " + kind) if len(self.n_trees) > 0: for k in self.n_trees: self.n_trees[k].update() self.n_trees[k].expand() # create appJar menu, if no menuBar created if not self.hasMenu: self.addAppJarMenu() if self.platform == self.WINDOWS: self.menuBar.add_cascade(menu=self.n_menus["WIN_SYS"]) self.topLevel.config(menu=self.menuBar) # pack it all in & make sure it's drawn self.appWindow.pack(fill=BOTH) self.topLevel.update_idletasks() # check geom is set and set a minimum size, also positions the window # if necessary self.__dimensionWindow() if self.splashConfig is not None: time.sleep(3) splash.destroy() # bring to front self.__bringToFront() self.topLevel.deiconify() # required to make the gui reopen after minimising if self.GET_PLATFORM() == self.MAC: self.topLevel.createcommand( 'tk::mac::ReopenApplication', self.topLevel.deiconify) # start the call back & flash loops self.__poll() self.__flash() # start the main loop try: self.topLevel.mainloop() except(KeyboardInterrupt, SystemExit): self.stop() def setStopFunction(self, function): """ set a function to call when the GUI is quit. Must return True or False """ tl = self.__getTopLevel() tl.stopFunction = function # link to exit item in topMenu # only if in root if self.containerStack[-1]['type'] != self.C_SUBWINDOW: tl.createcommand('exit', self.stop) def stop(self, event=None): """ Closes the GUI. If a stop function is set, will only close the GUI if True """ theFunc = self.__getTopLevel().stopFunction if theFunc is None or theFunc(): # stop the after loops self.topLevel.after_cancel(self.pollId) self.topLevel.after_cancel(self.flashId) self.topLevel.after_cancel(self.preloadAnimatedImageId) # stop any animations for key in self.n_imageAnimationIds: self.topLevel.after_cancel(self.n_imageAnimationIds[key]) # stop any sounds, ignore error when not on Windows try: self.stopSound() except: pass self.topLevel.quit() self.topLevel.destroy() ##################################### # Functions for configuring polling events ##################################### # events will fire in order of being added, after sleeping for time def setPollTime(self, time): """ Set a frequency for executing queued functions """ self.pollTime = time # register events to be called by the sleep timer def registerEvent(self, func): """ Queue a function, to be executed every poll time """ self.events.append(func) # internal function, called by 'after' function, after sleeping def __poll(self): # run any registered actions for e in self.events: # execute the event e() self.pollId = self.topLevel.after(self.pollTime, self.__poll) # not used now, but called every time window is resized # may be used in the future... def __windowEvent(self, event): new_width = self.topLevel.winfo_width() new_height = self.topLevel.winfo_height() self.debug("Window resized: " + str(new_width) + "x" + str(new_height)) # will call the specified function when enter key is pressed def enableEnter(self, func): """ Binds <Return> to the specified function - all widgets """ self.bindKey("<Return>", func) def disableEnter(self): """ unbinds <enter> from all widgets """ self.unbindKey("<Return>") def bindKey(self, key, func): """ bind the specified key, to the specified function, for all widgets """ # for now discard the Event... myF = self.MAKE_FUNC(func, key, True) self.__getTopLevel().bind(key, myF) def unbindKey(self, key): """ unbinds the specified key from whatever functions it os bound to """ self.__getTopLevel().unbind(key) # helper - will see if the mouse is in the specified widget def __isMouseInWidget(self, w): l_x = w.winfo_rootx() l_y = w.winfo_rooty() if l_x <= w.winfo_pointerx() <= l_x + \ w.winfo_width() and l_y <= w.winfo_pointery() <= l_y + w.winfo_height(): return True else: return False # function to give a clicked widget the keyboard focus def __grabFocus(self, e): e.widget.focus_set() ##################################### # FUNCTIONS for configuring GUI settings ##################################### # set a minimum size def __dimensionWindow(self): self.topLevel.update_idletasks() if self.__getTopLevel().geom != "fullscreen": # ISSUES HERE: # on MAC & LINUX, w_width/w_height always 1 # on WIN, w_height is bigger then r_height - leaving empty space # get the apps requested width & height r_width = self.__getTopLevel().winfo_reqwidth() r_height = self.__getTopLevel().winfo_reqheight() # get the current width & height w_width = self.__getTopLevel().winfo_width() w_height = self.__getTopLevel().winfo_height() # get the window's width & height m_width = self.topLevel.winfo_screenwidth() m_height = self.topLevel.winfo_screenheight() # determine best geom for OS if self.platform in [self.MAC, self.LINUX]: b_width = r_width b_height = r_height elif self.platform == self.WINDOWS: b_height = min(r_height, w_height) b_width = min(r_width, w_width) h_height = max(r_height, w_height) h_width = max(r_width, w_width) # if a geom has not ben set if self.__getTopLevel().geom is None: width = b_width height = b_height # store it in the app's geom self.__getTopLevel().geom = str(width) + "x" + str(height) else: # now split the app's geom width = int(self.__getTopLevel().geom.lower().split("x")[0]) height = int(self.__getTopLevel().geom.lower().split("x")[1]) # warn the user that their geom is not big enough if width < b_width or height < b_height: self.warn( "Specified dimensions (" + self.__getTopLevel().geom + "), less than requested dimensions (" + str(b_width) + "x" + str(b_height) + ")") # and set it as the minimum size self.__getTopLevel().minsize(width, height) # if the window hasn't been positioned by the user, put it in the # middle if not self.locationSet: if self.platform == self.WINDOWS: x = (m_width - h_width) / 2 y = (m_height - h_height) / 2 elif self.platform in [self.MAC, self.LINUX]: x = (m_width - width) / 2 y = (m_height - height) / 2 self.setLocation(x, y) # called to update screen geometry def setGeometry(self, geom, height=None): self.setGeom(geom, height) def setGeom(self, geom, height=None): if height is not None: geom = str(geom) + "x" + str(height) container = self.__getTopLevel() container.geom = geom if container.geom == "fullscreen": self.setFullscreen() else: self.exitFullscreen() if container.geom is not None: container.geometry(container.geom) # called to set screen position def setLocation(self, x, y): # get the window's width & height m_width = self.topLevel.winfo_screenwidth() m_height = self.topLevel.winfo_screenheight() if x < 0 or x > m_width or y < 0 or y > m_height: self.warn( "Invalid location: " + str(x) + ", " + str(y) + " - ignoring") return if self.containerStack[-1]['type'] != self.C_SUBWINDOW: self.locationSet = True self.__getTopLevel().geometry("+%d+%d" % (x, y)) # called to make sure this window is on top def __bringToFront(self, win=None): if win is None: win = self.topLevel if self.platform == self.MAC: import subprocess tmpl = 'tell application "System Events" to set frontmost of every process whose unix id is {} to true' script = tmpl.format(os.getpid()) subprocess.check_call(['/usr/bin/osascript', '-e', script]) win.after( 0, lambda: win.attributes("-topmost", False)) # val=os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "''' + PY_NAME + '''" to true' ''') win.lift() elif self.platform == self.WINDOWS: win.lift() elif self.platform == self.LINUX: win.lift() def setFullscreen(self, container=None): if not self.isFullscreen: self.isFullscreen = True if container is None: container = self.__getTopLevel() container.attributes('-fullscreen', True) container.escapeBindId = container.bind( '<Escape>', self.MAKE_FUNC( self.exitFullscreen, container, True), "+") # function to turn off fullscreen mode def exitFullscreen(self, container=None): if self.isFullscreen: self.isFullscreen = False if container is None: container = self.__getTopLevel() container.attributes('-fullscreen', False) if container.escapeBindId is not None: container.unbind('<Escape>', container.escapeBindId) myWarn = self.__pauseWarn() self.__doTitleBar() self.__resumeWarn(myWarn) return True else: return False def __pauseWarn(self): myWarn = self.WARN self.WARN = False return myWarn def __resumeWarn(self, myWarn): self.WARN = myWarn # set the current container's external grid padding def setPadX(self, x=0): self.containerStack[-1]['padx'] = x def setPadY(self, y=0): self.containerStack[-1]['pady'] = y # sets the padding around the border of the root container def setPadding(self, x, y=None): if y is None: if isinstance(x, list): self.containerStack[-1]['padx'] = x[0] self.containerStack[-1]['pady'] = x[1] else: self.containerStack[-1]['padx'] = x self.containerStack[-1]['pady'] = y def setGuiPadding(self, x, y=None): if y is None: if isinstance(x, list): self.containerStack[0]['container'].config(padx=x[0], pady=x[1]) else: self.containerStack[0]['container'].config(padx=x, pady=y) # sets the current containers internal padding def setIPadX(self, x=0): self.setInPadX(x) def setIPadY(self, y=0): self.setInPadY(y) def setIPadding(self, x, y=None): self.setInPadding(x, y) def setInPadX(self, x=0): self.containerStack[-1]['ipadx'] = x def setInPadY(self, y=0): self.containerStack[-1]['ipady'] = y def setInPadding(self, x, y=None): if y is None: if isinstance(x, list): self.containerStack[-1]['ipadx'] = x[0] self.containerStack[-1]['ipady'] = x[1] else: self.containerStack[-1]['ipadx'] = x self.containerStack[-1]['ipady'] = y # set an override sticky for this container def setSticky(self, sticky): self.containerStack[-1]['sticky'] = sticky # this tells widgets what to do when GUI is resized def setStretch(self, exp): self.setExpand(exp) def setExpand(self, exp): if exp.lower() == "none": self.containerStack[-1]['expand'] = "NONE" elif exp.lower() == "row": self.containerStack[-1]['expand'] = "ROW" elif exp.lower() == "column": self.containerStack[-1]['expand'] = "COLUMN" else: self.containerStack[-1]['expand'] = "ALL" def getFonts(self): return list(font.families()).sort() def increaseButtonFont(self): self.setButtonFont( self.buttonFont['size'] + 1) def decreaseButtonFont(self): self.setButtonFont( self.buttonFont['size'] - 1) def setButtonFont(self, size, font=None): if font is None: font = self.buttonFont['family'] self.buttonFont.config(family=font, size=size) def increaseLabelFont(self): self.setLabelFont(self.labelFont['size'] + 1) def decreaseLabelFont(self): self.setLabelFont(self.labelFont['size'] - 1) def setLabelFont(self, size, font=None): if font is None: font = self.labelFont['family'] self.labelFont.config(family=font, size=size) self.entryFont.config(family=font, size=size) self.rbFont.config(family=font, size=size) self.cbFont.config(family=font, size=size) self.scaleFont.config(family=font, size=size) self.messageFont.config(family=font, size=size) self.spinFont.config(family=font, size=size) self.optionFont.config(family=font, size=size) self.lbFont.config(family=font, size=size) self.taFont.config(family=font, size=size) self.linkFont.config(family=font, size=size) self.meterFont.config(family=font, size=size) self.propertiesFont.config(family=font, size=size) self.labelFrameFont.config(family=font, size=size) self.frameFont.config(family=font, size=size) self.toggleFrameFont.config(family=font, size=size) self.tabbedFrameFont.config(family=font, size=size) self.panedFrameFont.config(family=font, size=size) self.scrollPaneFont.config(family=font, size=size) self.gridFont.config(family=font, size=size) # need tbetter way to register font change events on grids for grid in self.n_grids: self.n_grids[grid].config(font=self.gridFont) def increaseFont(self): self.increaseLabelFont() self.increaseButtonFont() def decreaseFont(self): self.decreaseLabelFont() self.decreaseButtonFont() def setFont(self, size, font=None): self.setLabelFont(size, font) self.setButtonFont(size, font) # need to set a default colour for container # then populate that field # then use & update that field accordingly # all widgets will then need to use it # and here we update all.... def setFg(self, colour): self.SET_WIDGET_FG(self.containerStack[-1]['container'], colour, True) # self.topLevel = Tk() # self.appWindow = Frame, fills all of self.topLevel # self.tb = Frame, at top of appWindow # self.container = Frame, at bottom of appWindow => C_ROOT container # self.bglabel = Label, filling all of container def setBg(self, colour): if self.containerStack[-1]['type'] == self.C_ROOT: self.appWindow.config(background=colour) self.bgLabel.config(background=colour) self.containerStack[-1]['container'].config(background=colour) for child in self.containerStack[-1]['container'].winfo_children(): if not self.__isWidgetContainer(child): gui.SET_WIDGET_BG(child, colour) def __isWidgetContainer(self, widget): try: if widget.isContainer: return True except: pass return False def setResizable(self, canResize=True): self.__getTopLevel().isResizable = canResize if self.__getTopLevel().isResizable: self.__getTopLevel().resizable(True, True) else: self.__getTopLevel().resizable(False, False) def getResizable(self): return self.__getTopLevel().isResizable def __doTitleBar(self): if self.platform == self.MAC: self.warn( "Title bar hiding doesn't work on MAC - app may become unresponsive.") elif self.platform == self.LINUX: self.warn( "Title bar hiding doesn't work on LINUX - app may become unresponsive.") self.__getTopLevel().overrideredirect(not self.hasTitleBar) def hideTitleBar(self): self.hasTitleBar = False self.__doTitleBar() def showTitleBar(self): self.hasTitleBar = True self.__doTitleBar() # function to set the window's title def setTitle(self, title): self.__getTopLevel().title(title) # set an icon def setIcon(self, image): container = self.__getTopLevel() if image.endswith('.ico'): container.wm_iconbitmap(image) else: icon = self.__getImage(image) container.iconphoto(True, icon) def __getTopLevel(self): if len( self.containerStack) > 1 and self.containerStack[-1]['type'] == self.C_SUBWINDOW: return self.containerStack[-1]['container'] else: return self.topLevel # make the window transparent (between 0 & 1) def setTransparency(self, percentage): if self.platform == self.LINUX: self.warn("Transparency not supported on LINUX") else: if percentage > 1: percentage = percentage / 100 self.__getTopLevel().attributes("-alpha", percentage) ############################## # functions to deal with tabbing and right clicking ############################## def __focusNextWindow(self, event): event.widget.tk_focusNext().focus_set() nowFocus = self.topLevel.focus_get() if isinstance(nowFocus, Entry): nowFocus.select_range(0, END) return("break") def __focusLastWindow(self, event): event.widget.tk_focusPrev().focus_set() nowFocus = self.topLevel.focus_get() if isinstance(nowFocus, Entry): nowFocus.select_range(0, END) return("break") # creates relevant bindings on the widget def __addRightClickMenu(self, widget): widget.bind("<FocusIn>", self.__checkCopyAndPaste, add="+") widget.bind("<FocusOut>", self.__checkCopyAndPaste, add="+") if widget.var is None: # TEXT: widget.bind('<KeyRelease>', self.__checkCopyAndPaste) widget.bind('<<Paste>>', self.__checkCopyAndPaste) else: widget.var.trace( "w", lambda name, index, mode, e=None, w=widget: self.__checkCopyAndPaste( e, w)) # ENTRY/OPTION if self.platform in [self.WINDOWS, self.LINUX]: widget.bind('<Button-3>', self.__rightClick) else: widget.bind('<Button-2>', self.__rightClick) def __rightClick(self, event, menu="EDIT"): event.widget.focus() if menu == "EDIT": if self.__checkCopyAndPaste(event): self.n_menus[menu].tk_popup( event.x_root - 10, event.y_root - 10) else: self.n_menus[menu].tk_popup(event.x_root - 10, event.y_root - 10) return "break" ##################################### # FUNCTION to configure widgets ##################################### def __getItems(self, kind): if kind == self.LABEL: return self.n_labels elif kind == self.MESSAGE: return self.n_messages elif kind == self.BUTTON: return self.n_buttons elif kind == self.ENTRY: return self.n_entries elif kind == self.SCALE: return self.n_scales elif kind in [self.CB, self.CHECKBOX]: return self.n_cbs elif kind in [self.RB, self.RADIOBUTTON]: return self.n_rbs elif kind in [self.LB, self.LISTBOX]: return self.n_lbs elif kind == self.SPIN: return self.n_spins elif kind == self.OPTION: return self.n_options elif kind == self.TEXTAREA: return self.n_textAreas elif kind == self.LINK: return self.n_links elif kind == self.METER: return self.n_meters elif kind == self.IMAGE: return self.n_images elif kind == self.PIECHART: return self.n_pieCharts elif kind == self.PROPERTIES: return self.n_props elif kind == self.PLOT: return self.n_plots elif kind == self.GRID: return self.n_grids elif kind in [ self.LABELFRAME, self.C_LABELFRAME ]: return self.n_labelFrames elif kind in [ self.FRAME, self.C_FRAME ]: return self.n_ajFrame elif kind in [ self.TOGGLEFRAME, self.C_TOGGLEFRAME ]: return self.n_toggleFrames elif kind in [ self.PAGEDWINDOW, self.C_PAGEDWINDOW ]: return self.n_pagedWindows elif kind in [ self.C_PAGE ]: # no dict of pages - the container manages them... return self.n_pagedWindows elif kind in [ self.TABBEDFRAME, self.C_TABBEDFRAME ]: return self.n_tabbedFrames elif kind in [ self.C_TAB ]: # no dict of tabs - the container manages them... return self.n_tabbedFrames elif kind in [ self.PANEDFRAME ]: return self.n_panedFrames elif kind in [ self.PANE, self.C_PANE ]: return self.n_panes elif kind in [ self.C_SUBWINDOW ]: return self.n_subWindows elif kind in [ self.SCROLLPANE, self.C_SCROLLPANE ]: return self.n_scrollPanes else: raise Exception("Unknown widget type: " + str(kind)) def configureAllWidgets(self, kind, option, value): items = list(self.__getItems(kind)) self.configureWidgets(kind, items, option, value) def configureWidgets(self, kind, names, option, value): if not isinstance(names, list): self.configureWidget(kind, names, option, value) else: for widg in names: # incase 2D array, eg. buttons if isinstance(widg, list): for widg2 in widg: self.configureWidget(kind, widg2, option, value) else: self.configureWidget(kind, widg, option, value) def getWidget(self, kind, name): # get the list of items for this type, and validate the widget is in # the list items = self.__getItems(kind) return self.__verifyItem(items, name, False) def configureWidget( self, kind, name, option, value, key=None, deprecated=False): # warn about deprecated functions if deprecated: self.warn( "Deprecated config function (" + option + ") used for: " + self.WIDGETS[kind] + "->" + name + " use " + deprecated + " instead") if kind in [self.RB, self.LB, self.CB]: self.warn( "Deprecated config function (" + option + ") used for: " + self.WIDGETS[kind] + "->" + name + " use " + self.WIDGETS[ kind / 10] + " instead") # get the list of items for this type, and validate the widgetis in the # list items = self.__getItems(kind) self.__verifyItem(items, name) if kind in [self.RB, self.RADIOBUTTON]: items = items[name] else: items = [items[name]] # loop through each item, and try to reconfigure it # this will often fail - widgets have varied config options for item in items: try: if option == 'background': if kind == self.METER: item.config(bg=value) else: gui.SET_WIDGET_BG(item, value, True) elif option == 'foreground': if kind == self.ENTRY: if item.showingDefault: item.oldFg = value else: item.config(foreground=value) item.oldFg = value else: item.config(foreground=value) elif option == 'disabledforeground': item.config(disabledforeground=value) elif option == 'disabledbackground': item.config(disabledbackground=value) elif option == 'activeforeground': item.config(activeforeground=value) elif option == 'activebackground': item.config(activebackground=value) elif option == 'inactiveforeground': if kind == self.TABBEDFRAME: item.config(inactiveforeground=value) else: self.warn("Error configuring " + name + ": can't set inactiveforeground") elif option == 'inactivebackground': if kind == self.TABBEDFRAME: item.config(inactivebackground=value) else: self.warn("Error configuring " + name + ": can't set inactivebackground") elif option == 'width': item.config(width=value) elif option == 'height': item.config(height=value) elif option == 'state': # make entries readonly - can still copy/paste if value == "disabled" and kind == self.ENTRY: value = "readonly" item.config(state=value) elif option == 'relief': item.config(relief=value) elif option == 'align': if kind == self.ENTRY: if value == W or value == LEFT: value = LEFT elif value == E or value == RIGHT: value = RIGHT item.config(justify=value) else: if value == LEFT: value = "w" elif value == RIGHT: value = "e" item.config(anchor=value) elif option == 'anchor': item.config(anchor=value) elif option == 'cursor': item.config(cursor=value) elif option == 'tooltip': self.__addTooltip(item, value) elif option == "focus": item.focus_set() elif option == 'over': if not isinstance(value, list): value = [value] if len(value) == 1: value.append(None) if len(value) != 2: raise Exception( "Invalid arguments, set<widget>OverFunction requires 1 ot 2 functions to be passed in.") if kind == self.LABEL: if value[0] is not None: item.bind( "<Enter>", self.MAKE_FUNC( value[0], name, True), add="+") if value[1] is not None: item.bind( "<Leave>", self.MAKE_FUNC( value[1], name, True), add="+") #item.bind("<B1-Motion>",self.MAKE_FUNC(value[0], name, True), add="+") elif option == 'drag': if not isinstance(value, list): value = [value] if len(value) == 1: value.append(None) if len(value) != 2: raise Exception( "Invalid arguments, set<widget>DragFunction requires 1 ot 2 functions to be passed in.") if kind == self.LABEL: item.config(cursor="fleur") def getLabel(f): # loop through all labels for key, value in self.n_labels.items(): if self.__isMouseInWidget(value): f(key) return if value[0] is not None: item.bind( "<ButtonPress-1>", self.MAKE_FUNC( value[0], name, True), add="+") if value[1] is not None: item.bind( "<ButtonRelease-1>", self.MAKE_FUNC( getLabel, value[1], True), add="+") elif option == 'command': # this will discard the scale value, as default function # can't handle it if kind == self.SCALE: cmd = self.MAKE_FUNC(value, name, True) item.config(command=cmd) item.cmd = cmd elif kind == self.OPTION: # need to trace the variable?? cmd = self.MAKE_FUNC(value, name, True) item.var.trace('w', cmd) item.cmd = cmd elif kind == self.ENTRY: if key is None: key = name cmd = self.MAKE_FUNC(value, key, True) item.bind('<Return>', cmd) item.cmd = cmd elif kind == self.BUTTON: item.config(command=self.MAKE_FUNC(value, name)) item.bind( '<Return>', self.MAKE_FUNC( value, name, True)) # make labels clickable, add a cursor, and change the look elif kind == self.LABEL or kind == self.IMAGE: if self.platform == self.MAC: item.config(cursor="pointinghand") elif self.platform in [self.WINDOWS, self.LINUX]: item.config(cursor="hand2") cmd = self.MAKE_FUNC(value, name, True) item.bind("<Button-1>", cmd, add="+") item.cmd = cmd # these look good, but break when dialogs take focus #up = item.cget("relief").lower() # down="sunken" # make it look like it's pressed #item.bind("<Button-1>",lambda e: item.config(relief=down), add="+") #item.bind("<ButtonRelease-1>",lambda e: item.config(relief=up)) elif kind == self.LISTBOX: cmd = self.MAKE_FUNC(value, name, True) item.bind('<<ListboxSelect>>', cmd) item.cmd = cmd else: cmd = self.MAKE_FUNC(value, name) item.config(command=cmd) item.cmd = cmd elif option == 'sticky': info = {} # need to reposition the widget in its grid if self.__widgetHasContainer(kind, item): # pack uses LEFT & RIGHT & BOTH info["side"] = value if value.lower() == "both": info["expand"] = 1 info["side"] = "right" else: info["expand"] = 0 else: # grid uses E+W if value.lower() == "left": side = W elif value.lower() == "right": side = E elif value.lower() == "both": side = W + E else: side = value.upper() info["sticky"] = side self.__repackWidget(item, info) elif option == 'padding': if value[1] is None: item.config(padx=value[0][0], pady=value[0][1]) else: item.config(padx=value[0], pady=value[1]) elif option == 'ipadding': if value[1] is None: item.config(ipadx=value[0][0], ipady=value[0][1]) else: item.config(ipadx=value[0], ipady=value[1]) elif option == 'rightClick': if self.platform in [self.WINDOWS, self.LINUX]: item.bind( '<Button-3>', lambda e, menu=value: self.__rightClick( e, menu)) else: item.bind( '<Button-2>', lambda e, menu=value: self.__rightClick( e, menu)) except TclError as e: self.warn("Error configuring " + name + ": " + str(e)) # dynamic way to create the configuration functions def __buildConfigFuncs(self): # loop through all the available widgets # and make all the below functons for each one for k, v in self.WIDGETS.items(): exec( "def set" + v + "Bg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'background', val)") exec("gui.set" + v + "Bg=set" + v + "Bg") exec( "def set" + v + "Fg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'foreground', val)") exec("gui.set" + v + "Fg=set" + v + "Fg") exec( "def set" + v + "DisabledFg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'disabledforeground', val)") exec("gui.set" + v + "DisabledFg=set" + v + "DisabledFg") exec( "def set" + v + "DisabledBg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'disabledbackground', val)") exec("gui.set" + v + "DisabledBg=set" + v + "DisabledBg") exec( "def set" + v + "ActiveFg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'activeforeground', val)") exec("gui.set" + v + "ActiveFg=set" + v + "ActiveFg") exec( "def set" + v + "ActiveBg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'activebackground', val)") exec("gui.set" + v + "ActiveBg=set" + v + "ActiveBg") exec( "def set" + v + "InactiveFg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'inactiveforeground', val)") exec("gui.set" + v + "InactiveFg=set" + v + "InactiveFg") exec( "def set" + v + "InactiveBg(self, name, val): self.configureWidgets(" + str(k) + ", name, 'inactivebackground', val)") exec("gui.set" + v + "InactiveBg=set" + v + "InactiveBg") exec( "def set" + v + "Width(self, name, val): self.configureWidgets(" + str(k) + ", name, 'width', val)") exec("gui.set" + v + "Width=set" + v + "Width") exec( "def set" + v + "Height(self, name, val): self.configureWidgets(" + str(k) + ", name, 'height', val)") exec("gui.set" + v + "Height=set" + v + "Height") exec( "def set" + v + "State(self, name, val): self.configureWidgets(" + str(k) + ", name, 'state', val)") exec("gui.set" + v + "State=set" + v + "State") exec( "def set" + v + "Padding(self, name, x, y=None): self.configureWidgets(" + str(k) + ", name, 'padding', [x, y])") exec("gui.set" + v + "Padding=set" + v + "Padding") exec( "def set" + v + "IPadding(self, name, x, y=None): self.configureWidgets(" + str(k) + ", name, 'ipadding', [x, y])") exec("gui.set" + v + "IPadding=set" + v + "IPadding") exec( "def set" + v + "InPadding(self, name, x, y=None): self.configureWidgets(" + str(k) + ", name, 'ipadding', [x, y])") exec("gui.set" + v + "InPadding=set" + v + "InPadding") # might not all be necessary, could make exclusion list exec( "def set" + v + "Relief(self, name, val): self.configureWidget(" + str(k) + ", name, 'relief', val)") exec("gui.set" + v + "Relief=set" + v + "Relief") exec( "def set" + v + "Align(self, name, val): self.configureWidget(" + str(k) + ", name, 'align', val)") exec("gui.set" + v + "Align=set" + v + "Align") exec( "def set" + v + "Anchor(self, name, val): self.configureWidget(" + str(k) + ", name, 'anchor', val)") exec("gui.set" + v + "Anchor=set" + v + "Anchor") exec( "def set" + v + "Tooltip(self, name, val): self.configureWidget(" + str(k) + ", name, 'tooltip', val)") exec("gui.set" + v + "Tooltip=set" + v + "Tooltip") exec( "def set" + v + "Function(self, name, val, key=None): self.configureWidget(" + str(k) + ", name, 'command', val, key)") exec("gui.set" + v + "Function=set" + v + "Function") exec( "def set" + v + "DragFunction(self, name, val): self.configureWidget(" + str(k) + ", name, 'drag', val)") exec("gui.set" + v + "DragFunction=set" + v + "DragFunction") exec( "def set" + v + "OverFunction(self, name, val): self.configureWidget(" + str(k) + ", name, 'over', val)") exec("gui.set" + v + "OverFunction=set" + v + "OverFunction") # deprecated, but left in for backwards compatability exec( "def set" + v + "Command(self, name, val, key=None): self.configureWidget(" + str(k) + ", name, 'command', val, key, deprecated='Function')") exec("gui.set" + v + "Command=set" + v + "Command") exec( "def set" + v + "Func(self, name, val, key=None): self.configureWidget(" + str(k) + ", name, 'command', val, key, deprecated='Function')") exec("gui.set" + v + "Func=set" + v + "Func") # end deprecated # http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/cursors.html exec( "def set" + v + "Cursor(self, name, val): self.configureWidget(" + str(k) + ", name, 'cursor', val)") exec("gui.set" + v + "Cursor=set" + v + "Cursor") exec( "def set" + v + "Focus(self, name): self.configureWidget(" + str(k) + ", name, 'focus', None)") exec("gui.set" + v + "Focus=set" + v + "Focus") # change the stickyness exec( "def set" + v + "Sticky(self, name, pos): self.configureWidget(" + str(k) + ", name, 'sticky', pos)") exec("gui.set" + v + "Sticky=set" + v + "Sticky") # add right click exec( "def set" + v + "RightClick(self, name, menu): self.configureWidget(" + str(k) + ", name, 'rightClick', menu)") exec("gui.set" + v + "RightClick=set" + v + "RightClick") # functions to manage widgets exec( "def show" + v + "(self, name): self.showWidget(" + str(k) + ", name)") exec("gui.show" + v + "=show" + v) exec( "def hide" + v + "(self, name): self.hideWidget(" + str(k) + ", name)") exec("gui.hide" + v + "=hide" + v) exec( "def remove" + v + "(self, name): self.removeWidget(" + str(k) + ", name)") exec("gui.remove" + v + "=remove" + v) # convenience functions for enable/disable # might not all be necessary, could make exclusion list exec( "def enable" + v + "(self, name): self.configureWidget(" + str(k) + ", name, 'state', 'normal')") exec("gui.enable" + v + "=enable" + v) exec( "def disable" + v + "(self, name): self.configureWidget(" + str(k) + ", name, 'state', 'disabled')") exec("gui.disable" + v + "=disable" + v) # group functions exec( "def set" + v + "Widths(self, names, val): self.configureWidgets(" + str(k) + ", names, 'width', val)") exec("gui.set" + v + "Widths=set" + v + "Widths") exec( "def setAll" + v + "Widths(self, val): self.configureAllWidgets(" + str(k) + ", 'width', val)") exec("gui.setAll" + v + "Widths=setAll" + v + "Widths") exec( "def set" + v + "Heights(self, names, val): self.configureWidgets(" + str(k) + ", names, 'height', val)") exec("gui.set" + v + "Heights=set" + v + "Heights") exec( "def setAll" + v + "Heights(self, val): self.configureAllWidgets(" + str(k) + ", 'height', val)") exec("gui.setAll" + v + "Heights=setAll" + v + "Heights") exec( "def get" + v + "Widget(self, name): return self.getWidget(" + str(k) + ", name)") exec("gui.get" + v + "Widget=get" + v + "Widget") ##################################### #  FUNCTION to hide/show/remove widgets ##################################### def __widgetHasContainer(self, kind, item): if kind in [ self.SCALE, self.ENTRY, self.SPIN, self.OPTION, self.LABEL] and item.inContainer: return True else: return False def hideWidget(self, kind, name): # get the dictionary of items, and find the item in it items = self.__getItems(kind) item = self.__verifyItem(items, name) if self.__widgetHasContainer(kind, item): widget = item.master self.n_frameLabs[name].hidden = True else: if kind in [self.RB, self.RADIOBUTTON]: for rb in item: if rb.text == name: widget = rb widget = item if "in" in widget.grid_info(): widget.grid_remove() # self.__updateLabelBoxes(name) def showWidget(self, kind, name): # get the dictionary of items, and find the item in it items = self.__getItems(kind) item = self.__verifyItem(items, name) if self.__widgetHasContainer(kind, item): widget = item.master self.n_frameLabs[name].hidden = False else: widget = item # only show the widget, if it's not already showing if "in" not in widget.grid_info(): widget.grid() # self.__updateLabelBoxes(name) def removeWidget(self, kind, name): # get the dictionary of items, and find the item in it items = self.__getItems(kind) item = self.__verifyItem(items, name) # if it's a flasher, remove it if item in self.n_flashLabs: self.n_flashLabs.remove(item) if len(self.n_flashLabs) == 0: self.doFlash = False # animated images... if self.__widgetHasContainer(kind, item): # destroy the parent parent = item.master parent.grid_forget() parent.destroy() # remove frame, label & widget from lists self.n_labels.pop(name) self.n_frameLabs.pop(name) self.n_frames.remove(parent) else: item.grid_forget() item.destroy() # finally remove it from the dictionary items.pop(name) def removeAllWidgets(self): for child in self.containerStack[0]['container'].winfo_children(): child.destroy() self.__configBg(self.containerStack[0]['container']) self.__initArrays() self.setGeom(None) ##################################### # FUNCTION for managing commands ##################################### # funcion to wrap up lambda # if the thing calling this generates parameters - then set discard=True @staticmethod def MAKE_FUNC(funcName, param, discard=False): if discard: return lambda *args: funcName(param) else: return lambda: funcName(param) def __checkFunc(self, names, funcs): singleFunc = None if funcs is None: return None elif callable(funcs): singleFunc = funcs elif len(names) != len(funcs): raise Exception("List sizes don't match") return singleFunc ##################################### # FUNCTION to position a widget ##################################### # checks if the item already exists def __verifyItem(self, items, item, newItem=False): if not newItem and item not in items: raise ItemLookupError("Invalid key: " + item + " does not exist") elif not newItem and item in items: return items[item] elif newItem and item in items: raise ItemLookupError( "Duplicate key: '" + item + "' already exists") def getRow(self): return self.containerStack[-1]['emptyRow'] def gr(self): return self.getRow() def __repackWidget(self, widget, params): if widget.winfo_manager() == "grid": ginfo = widget.grid_info() ginfo.update(params) widget.grid(ginfo) elif widget.winfo_manager() == "pack": pinfo = widget.pack_info() pinfo.update(params) widget.pack(pinfo) else: raise Exception( "Unknown geometry manager: " + widget.winfo_manager()) # convenience function to set RCS, referencing the current container's # settings def __getRCS(self, row, column, colspan, rowspan): if row is None: row = self.containerStack[-1]['emptyRow'] self.containerStack[-1]['emptyRow'] = row + 1 if column >= self.containerStack[-1]['colCount']: self.containerStack[-1]['colCount'] = column + 1 # if column == 0 and colspan == 0 and self.containerStack[-1]['colCount'] > 1: # colspan = self.containerStack[-1]['colCount'] return row, column, colspan, rowspan def SET_WIDGET_FG(self, widget, fg, external=False): widgType = widget.__class__.__name__ if self.__isWidgetContainer(widget): self.containerStack[-1]['fg'] = fg elif widgType == "Link" and not external: pass else: try: widget.config(foreground=fg) except: pass # can't set an FG colour on this widget @staticmethod def TINT(widget, colour): col = [] for a, b in enumerate(widget.winfo_rgb(colour)): t = int(min(max(0, b / 256 + (255 - b / 256) * .3), 255)) t = str(hex(t))[2:] if len(t) == 1: t = '0' + t elif len(t) == 0: t = '00' col.append(t) return "#" + "".join(col) # convenience method to set a widget's bg @staticmethod def SET_WIDGET_BG(widget, bg, external=False): # POTENTIAL ISSUES # spinBox - highlightBackground # cbs/rbs - activebackground # grids - background if bg is None: return # ignore empty colours # , "Scale"]#, "Button", "OptionMenu"] darwinBorders = [ "Text", "ScrolledText", "Entry", "AutoCompleteEntry", "Button"] linuxBorders = darwinBorders + ["Radiobutton", "Checkbutton"] noBg = [ "Button", "Spinbox", "ListBox", "SplitMeter", "DualMeter", "Meter", "ToggleFrame", "OptionMenu"] # , "Scale"] widgType = widget.__class__.__name__ isDarwin = gui.GET_PLATFORM() == gui.MAC isLinux = gui.GET_PLATFORM() == gui.LINUX # always remove the border from scales if widgType == "Scale": widget.config(highlightbackground=bg) # tint the background colour when active... if widgType in ["Button", "OptionMenu", "Scale"]: widget.config(activebackground=gui.TINT(widget, bg)) # Mac specific colours if widgType in darwinBorders: if isDarwin: widget.config(highlightbackground=bg) # if widgType == "OptionMenu": widget.config(background=bg) if external or widgType == "Scale": widget.config(bg=bg) # Linux specific colours if widgType in linuxBorders: if isLinux: widget.config(highlightbackground=bg) if external: widget.config(bg=bg) # widget with label, in frame elif widgType == "LabelBox": widget.theLabel.config(bg=bg) gui.SET_WIDGET_BG(widget.theWidget, bg) # group of buttons or labels elif widgType == "WidgetBox": widget.config(bg=bg) for widg in widget.theWidgets: gui.SET_WIDGET_BG(widg, bg) elif widgType in ["LabelFrame", "PanedFrame", "Pane", "ajFrame"]: widget.config(bg=bg) for child in widget.winfo_children(): gui.SET_WIDGET_BG(child, bg) # any other widgets elif external: if gui.GET_PLATFORM() == gui.MAC: if widgType not in ["OptionMenu"]: widget.config(bg=bg) else: widget.config(bg=bg) elif widgType not in noBg: widget.config(bg=bg) def __getContainerBg(self): return self.__getContainer()["bg"] def __getContainerFg(self): try: return self.__getContainer()["fg"] except: return "black" # two important things here: # grid - sticky: position of widget in its space (side or fill) # row/columns configure - weight: how to grow with GUI def __positionWidget( self, widget, row, column=0, colspan=0, rowspan=0, sticky=W + E): # allow item to be added to container container = self.__getContainer() gui.SET_WIDGET_BG(widget, self.__getContainerBg()) self.SET_WIDGET_FG(widget, self.__getContainerFg()) # alpha paned window placement if self.containerStack[-1]['type'] == self.C_PANEDFRAME: container.add(widget) self.containerStack[-1]['widgets'] = True return # else, add to grid row, column, colspan, rowspan = self.__getRCS( row, column, colspan, rowspan) # build a dictionary for the named params iX = self.containerStack[-1]['ipadx'] iY = self.containerStack[-1]['ipady'] cX = self.containerStack[-1]['padx'] cY = self.containerStack[-1]['pady'] params = { "row": row, "column": column, "ipadx": iX, "ipady": iY, "padx": cX, "pady": cY} # if we have a column span, apply it if colspan != 0: params["columnspan"] = colspan # if we have a rowspan, apply it if rowspan != 0: params["rowspan"] = rowspan # 1) if param has sticky, use that # 2) if container has sticky - override # 3) else, none if self.containerStack[-1]["sticky"] is not None: params["sticky"] = self.containerStack[-1]["sticky"] elif sticky is not None: params["sticky"] = sticky else: pass # make colspanned widgets expand to fill height of cell if rowspan != 0: if "sticky" in params: if "n" not in params["sticky"]: params["sticky"] += "n" if "s" not in params["sticky"]: params["sticky"] += "s" else: params["sticky"] = "ns" # expand that dictionary out as we pass it as a value widget.grid(**params) self.containerStack[-1]['widgets'] = True # if we're in a PANEDFRAME - we need to set parent... if self.containerStack[-1]['type'] == self.C_PANE: self.containerStack[-2]['widgets'] = True # configure the row/column to expand equally if self.containerStack[-1]['expand'] in ["ALL", "COLUMN"]: Grid.columnconfigure(container, column, weight=1) else: Grid.columnconfigure(container, column, weight=0) if self.containerStack[-1]['expand'] in ["ALL", "ROW"]: Grid.rowconfigure(container, row, weight=1) else: Grid.rowconfigure(container, row, weight=0) # self.containerStack[-1]['container'].columnconfigure(0, weight=1) # self.containerStack[-1]['container'].rowconfigure(0, weight=1) ##################################### # FUNCTION to manage containers ##################################### # adds the container to the container stack - makes this the current # working container def __addContainer(self, cTitle, cType, container, row, col, sticky=None): containerData = {'type': cType, 'title': cTitle, 'container': container, 'emptyRow': row, 'colCount': col, 'sticky': sticky, 'padx': 0, 'pady': 0, 'ipadx': 0, 'ipady': 0, 'expand': "ALL", 'widgets': False, "fg": "black"} self.containerStack.append(containerData) def openRootPage(self, title): self.__openContainer(self.C_ROOT, title) def openLabelFrame(self, title): self.__openContainer(self.C_LABELFRAME, title) def openFrame(self, title): self.__openContainer(self.C_FRAME, title) def openToggleFrame(self, title): self.__openContainer(self.C_TOGGLEFRAME, title) def openPagedWindow(self, title): self.__openContainer(self.C_PAGEDWINDOW, title) def openPage(self, windowTitle, pageNumber): self.__openContainer(self.C_PAGE, windowTitle+"__"+str(pageNumber)) def openTabbedFrame(self, title): self.__openContainer(self.C_TABBEDFRAME, title) def openTab(self, frameTitle, tabTitle): self.__openContainer(self.C_TAB, frameTitle+"__"+tabTitle) def openPanedFrame(self, title): self.__openContainer(self.C_PANEDFRAME, title) def openPane(self, title): self.__openContainer(self.C_PANE, title) def openSubWindow(self, title): self.__openContainer(self.C_SUBWINDOW, title) def openScrollPane(self, title): self.__openContainer(self.C_SCROLLPANE, title) # function to reload the specified container def __openContainer(self, kind, title): # get the cached container config for this container cName = kind + "__" + title try: cConf = self.n_usedContainers[cName] except KeyError: raise Exception("Attempted to open invalid " + kind + ": " + str(title)) self.containerStack.append(cConf) # returns the current working container def __getContainer(self): container = self.containerStack[-1]['container'] if self.containerStack[-1]['type'] == self.C_SCROLLPANE: return container.interior elif self.containerStack[-1]['type'] == self.C_PAGEDWINDOW: return container.getPage() elif self.containerStack[-1]['type'] == self.C_TOGGLEFRAME: return container.getContainer() else: return container # if possible, removes the current container def __removeContainer(self): if len(self.containerStack) == 1: raise Exception("Can't remove container, already in root window.") else: container = self.containerStack.pop() if not container['widgets']: self.warn("Closing empty container: " + container['title']) # raise Exception("Put something in the container, before removing it.") # store the container so that it can be re-opened later name = container["type"] + "__" + container["title"] self.n_usedContainers[name] = container return container # functions to start the various containers def startContainer( self, fType, title, row=None, column=0, colspan=0, rowspan=0, sticky=None): if fType == self.C_LABELFRAME: # first, make a LabelFrame, and position it correctly self.__verifyItem(self.n_labelFrames, title, True) container = LabelFrame( self.containerStack[-1]['container'], text=title) container.isContainer = True container.config( background=self.__getContainerBg(), font=self.labelFrameFont, relief="groove") self.setPadX(5) self.setPadY(5) self.__positionWidget( container, row, column, colspan, rowspan, "nsew") self.n_labelFrames[title] = container # now, add to top of stack self.__addContainer(title, self.C_LABELFRAME, container, 0, 1, sticky) elif fType == self.C_FRAME: # first, make a Frame, and position it correctly self.__verifyItem(self.n_ajFrame, title, True) container = ajFrame(self.containerStack[-1]['container']) container.isContainer = True # container.config(background=self.__getContainerBg(), font=self.frameFont, relief="groove") container.config(background=self.__getContainerBg()) self.__positionWidget( container, row, column, colspan, rowspan, "nsew") self.n_ajFrame[title] = container # now, add to top of stack self.__addContainer(title, self.C_FRAME, container, 0, 1, sticky) elif fType == self.C_TABBEDFRAME: self.__verifyItem(self.n_tabbedFrames, title, True) tabbedFrame = TabbedFrame( self.containerStack[-1]['container'], bg=self.__getContainerBg()) # tabbedFrame.isContainer = True self.__positionWidget( tabbedFrame, row, column, colspan, rowspan, sticky=sticky) self.n_tabbedFrames[title] = tabbedFrame # now, add to top of stack self.__addContainer(title, self.C_TABBEDFRAME, tabbedFrame, 0, 1, sticky) elif fType == self.C_TAB: # add to top of stack self.containerStack[-1]['widgets'] = True tabTitle = self.containerStack[-1]['title'] + "__" + title self.__addContainer(tabTitle, self.C_TAB, self.containerStack[-1]['container'].addTab(title), 0, 1, sticky) elif fType == self.C_PANEDFRAME: # if we previously put a frame for widgets # remove it if self.containerStack[-1]['type'] == self.C_PANE: self.stopContainer() # now, add the new pane self.__verifyItem(self.n_panedFrames, title, True) pane = PanedWindow( self.containerStack[ -1]['container'], showhandle=True, sashrelief="groove", bg=self.__getContainerBg()) pane.isContainer = True self.__positionWidget( pane, row, column, colspan, rowspan, sticky=sticky) self.n_panedFrames[title] = pane # now, add to top of stack self.__addContainer(title, self.C_PANEDFRAME, pane, 0, 1, sticky) # now, add a frame to the pane self.startContainer(self.C_PANE, title) elif fType == self.C_PANE: # create a frame, and add it to the pane pane = Pane( self.containerStack[-1]['container'], bg=self.__getContainerBg()) pane.isContainer = True self.containerStack[-1]['container'].add(pane) self.n_panes[title] = pane # now, add to top of stack self.__addContainer(title, self.C_PANE, pane, 0, 1, sticky) elif fType == self.C_SCROLLPANE: scrollPane = ScrollPane( self.containerStack[-1]['container'], bg=self.__getContainerBg())#, width=100, height=100) scrollPane.isContainer = True # self.containerStack[-1]['container'].add(scrollPane) self.__positionWidget( scrollPane, row, column, colspan, rowspan, sticky=sticky) self.n_scrollPanes[title] = scrollPane # now, add to top of stack self.__addContainer(title, self.C_SCROLLPANE, scrollPane, 0, 1, sticky) elif fType == self.C_TOGGLEFRAME: toggleFrame = ToggleFrame( self.containerStack[-1]['container'], title=title, bg=self.__getContainerBg()) toggleFrame.configure(font=self.toggleFrameFont) toggleFrame.isContainer = True self.__positionWidget( toggleFrame, row, column, colspan, rowspan, sticky=sticky) self.__addContainer(title, self.C_TOGGLEFRAME, toggleFrame, 0, 1, "nw") self.n_toggleFrames[title] = toggleFrame elif fType == self.C_PAGEDWINDOW: # create the paged window pagedWindow = PagedWindow( self.containerStack[ -1]['container'], title=title, bg=self.__getContainerBg(), width=200, height=400) # bind events self.topLevel.bind("<Left>", pagedWindow.showPrev) self.topLevel.bind("<Control-Left>", pagedWindow.showFirst) self.topLevel.bind("<Right>", pagedWindow.showNext) self.topLevel.bind("<Control-Right>", pagedWindow.showLast) # register it as a container pagedWindow.isContainer = True self.__positionWidget( pagedWindow, row, column, colspan, rowspan, sticky=sticky) self.__addContainer(title, self.C_PAGEDWINDOW, pagedWindow, 0, 1, "nw") self.n_pagedWindows[title] = pagedWindow elif fType == self.C_PAGE: page = self.containerStack[-1]['container'].addPage() page.isContainer = True self.__addContainer(title, self.C_PAGE, page, 0, 1, sticky) self.containerStack[-1]['expand'] = "None" else: raise Exception("Unknown container: " + fType) ####### Tabbed Frames ######## def startTabbedFrame( self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"): self.startContainer( self.C_TABBEDFRAME, title, row, column, colspan, rowspan, sticky) def stopTabbedFrame(self): # auto close the existing TAB - keep it? if self.containerStack[-1]['type'] == self.C_TAB: self.warn("You didn't STOP the previous TAB") self.stopContainer() self.stopContainer() def setTabbedFrameTabExpand(self, title, expand=True): nb = self.__verifyItem(self.n_tabbedFrames, title) nb.expandTabs(expand) def setTabbedFrameSelectedTab(self, title, tab): nb = self.__verifyItem(self.n_tabbedFrames, title) nb.changeTab(tab) def setTabbedFrameDisabledTab(self, title, tab, disabled=True): nb = self.__verifyItem(self.n_tabbedFrames, title) nb.disableTab(tab, disabled) def setTabbedFrameDisableAllTabs(self, title, disabled=True): nb = self.__verifyItem(self.n_tabbedFrames, title) nb.disableAllTabs(disabled) def setTabBg(self, title, tab, colour): nb = self.__verifyItem(self.n_tabbedFrames, title) tab = nb.getTab(tab) gui.SET_WIDGET_BG(tab, colour) # tab.config(bg=colour) #gui.SET_WIDGET_BG(tab, colour) for child in tab.winfo_children(): gui.SET_WIDGET_BG(child, colour) def startTab(self, title): # auto close the previous TAB - keep it? if self.containerStack[-1]['type'] == self.C_TAB: self.warn("You didn't STOP the previous TAB") self.stopContainer() elif self.containerStack[-1]['type'] != self.C_TABBEDFRAME: raise Exception( "Can't add a Tab to the current container: ", self.containerStack[-1]['type']) self.startContainer(self.C_TAB, title) def getTabbedFrameSelectedTab(self, title): nb = self.__verifyItem(self.n_tabbedFrames, title) return nb.getSelectedTab() def stopTab(self): if self.containerStack[-1]['type'] != self.C_TAB: raise Exception("Can't stop a TAB, currently in:", self.containerStack[-1]['type']) self.stopContainer() ###### END Tabbed Frames ######## ##################################### # FUNCTION for simple grids ##################################### def addGrid( self, title, data, row=None, column=0, colspan=0, rowspan=0, action=None, addRow=False): self.__verifyItem(self.n_grids, title, True) grid = SimpleGrid( self.__getContainer(), title, data, action, addRow, buttonFont=self.buttonFont) grid.config(font=self.gridFont, background=self.__getContainerBg()) self.__positionWidget( grid, row, column, colspan, rowspan, N + E + S + W) self.n_grids[title] = grid def getGridEntries(self, title): return self.__verifyItem(self.n_grids, title).getEntries() def getGridSelectedCells(self, title): return self.__verifyItem(self.n_grids, title).getSelectedCells() def addGridRow(self, title, data): self.__verifyItem(self.n_grids, title).addRow(data) ######################################## def startPanedFrame( self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"): self.startContainer( self.C_PANEDFRAME, title, row, column, colspan, rowspan, sticky) def startPanedFrameVertical( self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"): self.startPanedFrame(title, row, column, colspan, rowspan, sticky) self.setPanedFrameVertical(title) # sticky is alignment inside frame # frame will be added as other widgets def startLabelFrame( self, title, row=None, column=0, colspan=0, rowspan=0, sticky=W): self.startContainer( self.C_LABELFRAME, title, row, column, colspan, rowspan, sticky) ###### TOGGLE FRAMES ####### def startToggleFrame( self, title, row=None, column=0, colspan=0, rowspan=0): self.startContainer( self.C_TOGGLEFRAME, title, row, column, colspan, rowspan, sticky="new") def stopToggleFrame(self): if self.containerStack[-1]['type'] != self.C_TOGGLEFRAME: raise Exception("Can't stop a TOGGLEFRAME, currently in:", self.containerStack[-1]['type']) self.containerStack[-1]['container'].stop() self.stopContainer() def toggleToggleFrame(self, title): toggle = self.__verifyItem(self.n_toggleFrames, title) toggle.toggle() def disableToggleFrame(self, title, disabled=True): toggle = self.__verifyItem(self.n_toggleFrames, title) toggle.disable(disabled) def getToggleFrameState(self, title): toggle = self.__verifyItem(self.n_toggleFrames, title) return toggle.isShowing() ###### PAGED WINDOWS ####### def startPagedWindow( self, title, row=None, column=0, colspan=0, rowspan=0): self.startContainer( self.C_PAGEDWINDOW, title, row, column, colspan, rowspan, sticky="nsew") def setPagedWindowPage(self, title, page): pager = self.__verifyItem(self.n_pagedWindows, title) pager.showPage(page) def setPagedWindowButtonsTop(self, title, top=True): pager = self.__verifyItem(self.n_pagedWindows, title) pager.setNavPositionTop(top) def setPagedWindowButtons(self, title, buttons): pager = self.__verifyItem(self.n_pagedWindows, title) if not isinstance(buttons, list) or len(buttons) != 2: raise Exception( "You must provide a list of two strings fot setPagedWinowButtons()") pager.setPrevButton(buttons[0]) pager.setNextButton(buttons[1]) def setPagedWindowFunction(self, title, func): pager = self.__verifyItem(self.n_pagedWindows, title) command = self.MAKE_FUNC(func, title) pager.registerPageChangeEvent(command) def getPagedWindowPageNumber(self, title): pager = self.__verifyItem(self.n_pagedWindows, title) return pager.getPageNumber() def showPagedWindowPageNumber(self, title, show=True): pager = self.__verifyItem(self.n_pagedWindows, title) pager.showLabel(show) def showPagedWindowTitle(self, title, show=True): pager = self.__verifyItem(self.n_pagedWindows, title) pager.showTitle(show) def setPagedWindowTitle(self, title, pageTitle): pager = self.__verifyItem(self.n_pagedWindows, title) pager.setTitle(pageTitle) def startPage(self, row=None, column=0, colspan=0, rowspan=0, sticky="nw"): if self.containerStack[-1]['type'] == self.C_PAGE: self.warn("You didn't STOP the previous PAGE") self.stopPage() elif self.containerStack[-1]['type'] != self.C_PAGEDWINDOW: raise Exception("Can't start a PAGE, currently in:", self.containerStack[-1]['type']) self.containerStack[-1]['widgets'] = True # generate a page title pageNum = len(self.containerStack[-1]['container'].frames) + 1 pageTitle = self.containerStack[-1]['title'] + "__" + str(pageNum) self.startContainer( self.C_PAGE, pageTitle, row, column, colspan, rowspan, sticky=sticky) def stopPage(self): # get a handle on the page object page = self.containerStack[-1]['container'] if self.containerStack[-1]['type'] == self.C_PAGE: self.stopContainer() else: raise Exception("Can't stop PAGE, currently in:", self.containerStack[-1]['type']) # call the stopPage function on the paged window if self.containerStack[-1]['type'] == self.C_PAGEDWINDOW: self.containerStack[-1]['container'].stopPage() else: # we need to find the container and call stopPage page.container.stopPage() def stopPagedWindow(self): if self.containerStack[-1]['type'] == self.C_PAGE: self.warn("You didn't STOP the previous PAGE") self.containerStack[-1]['container'].stopPage() self.stopContainer() if self.containerStack[-1]['type'] != self.C_PAGEDWINDOW: raise Exception("Can't stop a PAGEDWINDOW, currently in:", self.containerStack[-1]['type']) self.stopContainer() ###### PAGED WINDOWS ####### def startScrollPane( self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"): self.startContainer( self.C_SCROLLPANE, title, row, column, colspan, rowspan, sticky) # functions to stop the various containers def stopContainer(self): self.__removeContainer() def stopFrame(self): if self.containerStack[-1]['type'] != self.C_FRAME: raise Exception("Can't stop a FRAME, currently in:", self.containerStack[-1]['type']) self.stopContainer() def stopLabelFrame(self): if self.containerStack[-1]['type'] != self.C_LABELFRAME: raise Exception("Can't stop a LABELFRAME, currently in:", self.containerStack[-1]['type']) self.stopContainer() def stopPanedFrame(self): if self.containerStack[-1]['type'] == self.C_PANE: self.stopContainer() if self.containerStack[-1]['type'] != self.C_PANEDFRAME: raise Exception("Can't stop a PANEDFRAME, currently in:", self.containerStack[-1]['type']) self.stopContainer() def stopScrollPane(self): if self.containerStack[-1]['type'] != self.C_SCROLLPANE: raise Exception("Can't stop a SCROLLPANE, currently in:", self.containerStack[-1]['type']) self.stopContainer() def stopAllPanedFrames(self): while True: try: self.stopPanedFrame() except: break def startFrame( self, title, row=None, column=0, colspan=0, rowspan=0, sticky="NSEW"): self.startContainer( self.C_FRAME, title, row, column, colspan, rowspan, sticky) ### SUB WINDOWS ### def startSubWindow(self, name, title=None, modal=False, grouped=False): self.__verifyItem(self.n_subWindows, name, True) if title is None: title = name top = SubWindow() top.modal = modal top.title(title) top.protocol( "WM_DELETE_WINDOW", self.MAKE_FUNC( self.hideSubWindow, name)) top.withdraw() top.win = self if not grouped: top.group(self.topLevel.group()) self.n_subWindows[name] = top # now, add to top of stack self.__addContainer(name, self.C_SUBWINDOW, top, 0, 1, "") def stopSubWindow(self): if self.containerStack[-1]['type'] == self.C_SUBWINDOW: self.stopContainer() else: raise Exception("Can't stop a SUBWINDOW, currently in:", self.containerStack[-1]['type']) # functions to show/hide/destroy SubWindows def showSubWindow(self, title): tl = self.__verifyItem(self.n_subWindows, title) tl.deiconify() tl.config(takefocus=True) tl.killLab = Label(tl) if tl.modal: tl.transient(self.topLevel) tl.grab_set() tl.focus_set() self.topLevel.wait_window(tl.killLab) def setSubWindowLocation(self, title, x, y): tl = self.__verifyItem(self.n_subWindows, title) tl.geometry("+%d+%d" % (x, y)) def hideSubWindow(self, title): tl = self.__verifyItem(self.n_subWindows, title) theFunc = tl.stopFunction if theFunc is None or theFunc(): tl.withdraw() if tl.modal: tl.killLab.destroy() self.topLevel.grab_set() self.topLevel.focus_set() def destroySubWindow(self, title): tl = self.__verifyItem(self.n_subWindows, title) theFunc = tl.stopFunction if theFunc is None or theFunc(): tl.withdraw() tl.killLab.destroy() tl.killLab = None self.topLevel.grab_set() self.topLevel.focus_set() tl.destroy() del self.n_subWindows[title] #### END SUB WINDOWS #### # make a PanedFrame align vertically def setPanedFrameVertical(self, window): pane = self.__verifyItem(self.n_panedFrames, window) pane.config(orient=VERTICAL) # function to set position of title for label frame def setLabelFrameAnchor(self, title, anchor): frame = self.__verifyItem(self.n_labelFrames, title) frame.config(labelanchor=anchor) ##################################### # warn when bad functions called... ##################################### def __getattr__(self, name): def handlerFunction(*args, **kwargs): self.warn( "Unknown function:" + name + " " + str(args) + " " + str(kwargs)) return handlerFunction def __setattr__(self, name, value): if self.built and not hasattr( self, name): # would this create a new attribute? raise AttributeError("Creating new attributes is not allowed!") if PYTHON2: object.__setattr__(self, name, value) else: super(gui, self).__setattr__(name, value) ##################################### # FUNCTION to add labels before a widget ##################################### # this will build a frame, with a label on the left hand side def __getLabelBox(self, title): self.__verifyItem(self.n_labels, title, True) # first, make a frame frame = LabelBox(self.__getContainer()) frame.config(background=self.__getContainerBg()) self.n_frames.append(frame) # if this is a big label, update the others to match... if len(title) > self.labWidth: self.labWidth = len(title) # loop through other labels and resize # for na in self.n_frameLabs: # self.n_frameLabs[na].config(width=self.labWidth) # next make the label lab = Label(frame) frame.theLabel = lab lab.hidden = False lab.inContainer = True lab.config( anchor=W, text=title, justify=LEFT, font=self.labelFont, background=self.__getContainerBg()) # lab.config( width=self.labWidth) lab.DEFAULT_TEXT = title self.n_labels[title] = lab self.n_frameLabs[title] = lab # now put the label in the frame lab.pack(side=LEFT, fill=Y) #lab.grid( row=0, column=0, sticky=W ) #Grid.columnconfigure(frame, 0, weight=1) #Grid.rowconfigure(frame, 0, weight=1) return frame # this is where we add the widget to the frame built above def __packLabelBox(self, frame, widget): widget.pack(side=LEFT, fill=BOTH, expand=True) widget.inContainer = True frame.theWidget = widget #widget.grid( row=0, column=1, sticky=W+E ) #Grid.columnconfigure(frame, 1, weight=1) #Grid.rowconfigure(frame, 0, weight=1) # function to resize labels, if they are hidden or shown def __updateLabelBoxes(self, title): if len(title) >= self.labWidth: self.labWidth = 0 # loop through other labels and resize for na in self.n_frameLabs: size = len(self.n_frameLabs[na].cget("text")) if not self.n_frameLabs[na].hidden and size > self.labWidth: self.labWidth = size for na in self.n_frameLabs: self.n_frameLabs[na].config(width=self.labWidth) ##################################### # FUNCTION for check boxes ##################################### def addCheckBox(self, title, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_cbs, title, True) var = IntVar(self.topLevel) cb = Checkbutton(self.__getContainer()) cb.config( text=title, variable=var, font=self.cbFont, background=self.__getContainerBg(), activebackground=self.__getContainerBg()) cb.DEFAULT_TEXT = title cb.config(anchor=W) cb.bind("<Button-1>", self.__grabFocus) self.n_cbs[title] = cb self.n_boxVars[title] = var self.__positionWidget(cb, row, column, colspan, rowspan, EW) def getCheckBox(self, title): bVar = self.__verifyItem(self.n_boxVars, title) if bVar.get() == 1: return True else: return False def setCheckBox(self, title, ticked=True, callFunction=True): cb = self.__verifyItem(self.n_cbs, title) if ticked: cb.select() else: cb.deselect() # now call function if callFunction: if hasattr(cb, 'cmd'): cb.cmd() ##################################### # FUNCTION for scales ##################################### def __buildScale(self, title, frame): self.__verifyItem(self.n_scales, title, True) scale = Scale(frame) scale.config( repeatinterval=10, digits=1, orient=HORIZONTAL, showvalue=False, highlightthickness=1) scale.inContainer = False self.n_scales[title] = scale scale.bind("<Button-1>", self.__grabFocus) return scale def addScale(self, title, row=None, column=0, colspan=0, rowspan=0): scale = self.__buildScale(title, self.__getContainer()) self.__positionWidget(scale, row, column, colspan, rowspan) def addLabelScale(self, title, row=None, column=0, colspan=0, rowspan=0): frame = self.__getLabelBox(title) scale = self.__buildScale(title, frame) self.__packLabelBox(frame, scale) self.__positionWidget(frame, row, column, colspan, rowspan) def getScale(self, title): sc = self.__verifyItem(self.n_scales, title) return sc.get() def setScale(self, title, pos, callFunction=True): sc = self.__verifyItem(self.n_scales, title) sc.set(pos) # now call function if callFunction: if hasattr(sc, 'cmd'): sc.cmd() def setScaleWidth(self, title, width): sc = self.__verifyItem(self.n_scales, title) sc.config(width=width) def setScaleLength(self, title, length): sc = self.__verifyItem(self.n_scales, title) sc.config(sliderlength=length) # this will make the scale show interval numbers # set to 0 to remove def showScaleIntervals(self, title, intervals): sc = self.__verifyItem(self.n_scales, title) sc.config(tickinterval=intervals) # this will make the scale show its value def showScaleValue(self, title, show=True): sc = self.__verifyItem(self.n_scales, title) sc.config(showvalue=show) # change the orientation (Hor or Vert) def orientScaleHor(self, title, hor=True): self.warn( ".orientScaleHor() is deprecated. Please use .setScaleHorizontal() or .setScaleVertical()") sc = self.__verifyItem(self.n_scales, title) if hor: sc.config(orient=HORIZONTAL) else: sc.config(orient=VERTICAL) def setScaleVertical(self, title): sc = self.__verifyItem(self.n_scales, title) sc.config(orient=VERTICAL) def setScaleHorizontal(self, title): sc = self.__verifyItem(self.n_scales, title) sc.config(orient=HORIZONTAL) def setScaleRange(self, title, start, end, curr=None): if curr is None: curr = start sc = self.__verifyItem(self.n_scales, title) sc.config(from_=start, to=end) self.setScale(title, curr) ##################################### # FUNCTION for optionMenus ##################################### def __buildOptionBox(self, frame, title, options, kind="normal"): self.__verifyItem(self.n_options, title, True) # create a string var to hold selected item var = StringVar(self.topLevel) self.n_optionVars[title] = var maxSize, options = self.__configOptionBoxList(title, options, kind) if len(options) > 0 and kind == "normal": option = OptionMenu(frame, var, *options) var.set(options[0]) option.kind = "normal" elif kind == "ticks": # http://stackoverflow.com/questions/29019760/how-to-create-a-combobox-that-includes-checkbox-for-each-item option = OptionMenu(frame, variable=var, value="") # delete the empty value we just added option['menu'].delete(0, 'end') var.set(title) vals = {} for o in options: vals[o] = BooleanVar() option['menu'].add_checkbutton( label=o, onvalue=True, offvalue=False, variable=vals[o]) self.n_optionVars[title] = vals option.kind = "ticks" else: option = OptionMenu(frame, var, []) option.kind = "normal" option.config( justify=LEFT, font=self.optionFont, background=self.__getContainerBg(), highlightthickness=1, width=maxSize, takefocus=1) option.bind("<Button-1>", self.__grabFocus) # compare on windows & mac #option.config(highlightthickness=12, bd=0, highlightbackground=self.__getContainerBg()) option.var = var option.maxSize = maxSize option.inContainer = False option.options = options option.DEFAULT_TEXT="" if options is not None: option.DEFAULT_TEXT='\n'.join(str(x) for x in options) # configure the drop-down too dropDown = option.nametowidget(option.menuname) dropDown.configure(font=self.optionFont) # dropDown.configure(background=self.__getContainerBg()) # if self.platform == self.MAC: # option.config(highlightbackground=self.__getContainerBg()) option.bind("<Tab>", self.__focusNextWindow) option.bind("<Shift-Tab>", self.__focusLastWindow) # add a right click menu self.__addRightClickMenu(option) self.__disableOptionBoxSeparators(option) # add to array list self.n_options[title] = option return option def addOptionBox( self, title, options, row=None, column=0, colspan=0, rowspan=0): option = self.__buildOptionBox(self.__getContainer(), title, options) self.__positionWidget(option, row, column, colspan, rowspan) def addTickOptionBox( self, title, options, row=None, column=0, colspan=0, rowspan=0): tick = self.__buildOptionBox( self.__getContainer(), title, options, "ticks") self.__positionWidget(tick, row, column, colspan, rowspan) def addLabelTickOptionBox( self, title, options, row=None, column=0, colspan=0, rowspan=0): frame = self.__getLabelBox(title) tick = self.__buildOptionBox(frame, title, options, "ticks") self.__packLabelBox(frame, tick) self.__positionWidget(frame, row, column, colspan, rowspan) def addLabelOptionBox( self, title, options, row=None, column=0, colspan=0, rowspan=0): frame = self.__getLabelBox(title) option = self.__buildOptionBox(frame, title, options) self.__packLabelBox(frame, option) self.__positionWidget(frame, row, column, colspan, rowspan) def getOptionBox(self, title): self.__verifyItem(self.n_optionVars, title) val = self.n_optionVars[title] if isinstance(val, dict): retVal = {} for k, v in val.items(): retVal[k] = bool(v.get()) return retVal else: val = val.get().strip() # set to None if it's a divider if val.startswith("-") or len(val) == 0: val = None return val def __disableOptionBoxSeparators(self, box): # disable any separators for pos, item in enumerate(box.options): if item.startswith("-"): box["menu"].entryconfigure(pos, state="disabled") def __configOptionBoxList(self, title, options, kind): # deal with a dict_keys object - messy!!!! if not isinstance(options, list): options = list(options) # make sure all options are strings options = [str(i) for i in options] # check for empty strings, replace first with message, remove rest found = False for pos, item in enumerate(options): if item == "": if not found: options[pos] = "- options -" found = True else: del options[pos] # get the longest string length try: maxSize = len(str(max(options, key=len))) except: try: maxSize = len(str(max(options))) except: maxSize = 0 # increase if ticks if kind == "ticks": if len(title) > maxSize: maxSize = len(title) # new bug?!? - doesn't fit anymore! if self.platform == self.MAC: maxSize += 3 return maxSize, options # function to replace the current contents of an option box # http://www.prasannatech.net/2009/06/tkinter-optionmenu-changing-choices.html def changeOptionBox(self, title, options, index=None): # get the optionBox & associated var box = self.__verifyItem(self.n_options, title) if box.kind == "ticks": self.warn("Unable to change TickOptionBoxes") return var = self.n_optionVars[title] # tidy up list and get max size maxSize, options = self.__configOptionBoxList(title, options, "normal") # warn if new options bigger if maxSize > box.maxSize: self.warn("The new options are wider then the old ones. " + str(maxSize) + ">" + str(box.maxSize)) # delete the current options box['menu'].delete(0, 'end') #var.set(" ") # add the new items for option in options: box["menu"].add_command( label=option, command=lambda temp=option: box.setvar( box.cget("textvariable"), value=temp)) box.options = options # disable any separators self.__disableOptionBoxSeparators(box) var.set(options[0]) # select the specified option self.setOptionBox(title, index) def deleteOptionBox(self, title, index): self.__verifyItem(self.n_optionVars, title) self.setOptionBox(title, index, None) # select the option at the specified position def setOptionBox(self, title, index, value=True): var = self.__verifyItem(self.n_optionVars, title) box = self.n_options[title] if box.kind == "ticks": if index in var: var[index].set(value) else: raise Exception("Unknown TickOptionBox: " + str(index) + " in: " + title) else: count = len(box.options) if count > 0: if index is None: index = 0 if not isinstance(index, int): try: index = box.options.index(index) except: self.warn("Invalid selection option: " + str(index)) return if index < 0 or index > count - 1: self.warn("Invalid selection index: " + str(index) + ". Should be between 0 and " + str(count - 1) + ".") else: # then we can delete it... if value is None: box['menu'].delete(index) del(box.options[index]) self.setOptionBox(title, 0) else: if not box['menu'].invoke(index): self.warn( "Invalid selection index: " + str(index) + " is a disabled index.") else: var.set("") self.warn("No items to select from: " + title) ##################################### # FUNCTION for matplotlib ##################################### def addPlot( self, title, t, s, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_plots, title, True) from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure fig = Figure() axes = fig.add_subplot(111) axes.plot(t,s) canvas = FigureCanvasTkAgg(fig, self.__getContainer()) canvas.fig = fig canvas.axes = axes canvas.show() # canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1) self.__positionWidget(canvas.get_tk_widget(), row, column, colspan, rowspan) self.n_plots[title] = canvas return axes def refreshPlot(self, title): canvas = self.__verifyItem(self.n_plots, title) canvas.draw() def updatePlot(self, title, t, s): axes = self.__verifyItem(self.n_plots, title).axes axes.clear() axes.plot(t, s) self.refreshPlot(title) ##################################### # FUNCTION to manage Properties Widgets ##################################### def addProperties( self, title, values=None, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_props, title, True) haveTitle = True if self.containerStack[-1]['type'] == self.C_TOGGLEFRAME: self.containerStack[-1]['sticky'] = "ew" haveTitle = False props = Properties( self.__getContainer(), title, values, haveTitle, font=self.propertiesFont, background=self.__getContainerBg()) self.__positionWidget(props, row, column, colspan, rowspan) self.n_props[title] = props def getProperties(self, title): props = self.__verifyItem(self.n_props, title) return props.getProperties() def getProperty(self, title, prop): props = self.__verifyItem(self.n_props, title) return props.getProperty(prop) def setProperty(self, title, prop, value=False): props = self.__verifyItem(self.n_props, title) props.addProperty(prop, value) def setProperties(self, title, props): p = self.__verifyItem(self.n_props, title) p.addProperties(props) def deleteProperty(self, title, prop): props = self.__verifyItem(self.n_props, title) props.addProperty(prop, None) ##################################### # FUNCTION to add spin boxes ##################################### def __buildSpinBox(self, frame, title, vals): self.__verifyItem(self.n_spins, title, True) if type(vals) not in [list, tuple]: raise Exception( "Can't create SpinBox " + title + ". Invalid values: " + str(vals)) spin = Spinbox(frame) spin.inContainer = False spin.isRange = False spin.config(font=self.entryFont, highlightthickness=0) # adds bg colour under spinners # if self.platform == self.MAC: # spin.config(highlightbackground=self.__getContainerBg()) spin.bind("<Tab>", self.__focusNextWindow) spin.bind("<Shift-Tab>", self.__focusLastWindow) # store the vals in DEFAULT_TEXT spin.DEFAULT_TEXT="" if vals is not None: spin.DEFAULT_TEXT='\n'.join(str(x) for x in vals) # make sure it's a list # reverse it, so the spin box functions properly vals = list(vals) vals.reverse() vals = tuple(vals) spin.config(values=vals) # prevent invalid entries if self.validateSpinBox is None: self.validateSpinBox = ( self.containerStack[0]['container'].register( self.__validateSpinBox), '%P', '%W') spin.config(validate='all', validatecommand=self.validateSpinBox) self.n_spins[title] = spin return spin def __addSpinBox( self, title, values, row=None, column=0, colspan=0, rowspan=0): spin = self.__buildSpinBox(self.__getContainer(), title, values) self.__positionWidget(spin, row, column, colspan, rowspan) self.setSpinBoxPos(title, 0) return spin def addSpinBox( self, title, values, row=None, column=0, colspan=0, rowspan=0): return self.__addSpinBox(title, values, row, column, colspan, rowspan) def addLabelSpinBox( self, title, values, row=None, column=0, colspan=0, rowspan=0): frame = self.__getLabelBox(title) spin = self.__buildSpinBox(frame, title, values) self.__packLabelBox(frame, spin) self.__positionWidget(frame, row, column, colspan, rowspan) self.setSpinBoxPos(title, 0) return spin def addSpinBoxRange( self, title, fromVal, toVal, row=None, column=0, colspan=0, rowspan=0): vals = list(range(fromVal, toVal + 1)) spin = self.__addSpinBox(title, vals, row, column, colspan, rowspan) spin.isRange = True def addLabelSpinBoxRange( self, title, fromVal, toVal, row=None, column=0, colspan=0, rowspan=0): vals = list(range(fromVal, toVal + 1)) spin = self.addLabelSpinBox(title, vals, row, column, colspan, rowspan) spin.isRange = True def getSpinBox(self, title): spin = self.__verifyItem(self.n_spins, title) return spin.get() # validates that an item in the named spinbox starts with the user_input def __validateSpinBox(self, user_input, widget_name): spin = self.containerStack[0]['container'].nametowidget(widget_name) vals = spin.cget("values") # .split() vals = self.__getSpinBoxValsAsList(vals) for i in vals: if i.startswith(user_input): return True self.containerStack[0]['container'].bell() return False # expects a valid spin box widget, and a valid value def __setSpinBoxVal(self, spin, val, callFunction=True): var = StringVar(self.topLevel) var.set(val) spin.config(textvariable=var) # now call function if callFunction: if hasattr(spin, 'cmd'): spin.cmd() # is it going to be a hash or list?? def __getSpinBoxValsAsList(self, vals): vals.replace("{", "") vals.replace("}", "") # if "{" in vals: # vals = vals[1:-1] # vals = vals.split("} {") # else: vals = vals.split() return vals def setSpinBox(self, title, value, callFunction=True): spin = self.__verifyItem(self.n_spins, title) vals = spin.cget("values") # .split() vals = self.__getSpinBoxValsAsList(vals) val = str(value) if val not in vals: raise Exception( "Invalid value: " + val + ". Not in SpinBox: " + title + "=" + str(vals)) self.__setSpinBoxVal(spin, val, callFunction) def setSpinBoxPos(self, title, pos, callFunction=True): spin = self.__verifyItem(self.n_spins, title) vals = spin.cget("values") # .split() vals = self.__getSpinBoxValsAsList(vals) pos = int(pos) if pos < 0 or pos >= len(vals): raise Exception( "Invalid position: " + str(pos) + ". No position in SpinBox: " + title + "=" + str(vals)) pos = len(vals) - 1 - pos val = vals[pos] self.__setSpinBoxVal(spin, val, callFunction) def changeSpinBox(self, title, vals): spin = self.__verifyItem(self.n_spins, title) if spin.isRange: self.warn("Can't convert " + title + " RangeSpinBox to SpinBox") else: vals = list(vals) vals.reverse() vals = tuple(vals) spin.config(values=vals) self.setSpinBoxPos(title, 0) ##################################### # FUNCTION to add images ##################################### # looks up label containing image def __animateImage(self, title, firstTime=False): try: lab = self.__verifyItem(self.n_images, title) except ItemLookupError: # image destroyed... try: del self.n_imageAnimationIds[title] except: pass return if not lab.image.animating: del self.n_imageAnimationIds[title] return if firstTime and lab.image.alreadyAnimated: return lab.image.alreadyAnimated = True try: if lab.image.cached: pic = lab.image.pics[lab.image.anim_pos] else: pic = PhotoImage(file=lab.image.path, format="gif - {}".format(lab.image.anim_pos)) lab.image.pics.append(pic) lab.image.anim_pos += 1 lab.config(image=pic) anim_id = self.topLevel.after( lab.image.anim_speed, self.__animateImage, title) self.n_imageAnimationIds[title] = anim_id except: lab.image.anim_pos = 0 lab.image.cached = True self.__animateImage(title) def __preloadAnimatedImage(self, img): if img.cached: return try: pic = PhotoImage(file=img.path, format="gif - {0}".format(img.anim_pos)) img.pics.append(pic) img.anim_pos += 1 self.preloadAnimatedImageId = self.topLevel.after( 0, self.__preloadAnimatedImage, img) # when all frames have been processed except TclError: img.anim_pos = 0 img.cached = True def __configAnimatedImage(self, img): img.alreadyAnimated = False img.isAnimated = True img.pics = [] img.cached = False img.anim_pos = 0 img.anim_speed = 150 img.animating = True # simple way to check if image is animated def __checkIsAnimated(self, name): if imghdr.what(name) == "gif": try: PhotoImage(file=name, format="gif - 1") return True except: pass return False def setAnimationSpeed(self, name, speed): img = self.__verifyItem(self.n_images, name).image if speed < 1: speed = 1 self.warn("Setting " + str(name) + " speed to 1. Minimum animation speed is 1.") img.anim_speed = speed def stopAnimation(self, name): img = self.__verifyItem(self.n_images, name).image img.animating = False def startAnimation(self, name): img = self.__verifyItem(self.n_images, name).image if not img.animating: img.animating = True anim_id = self.topLevel.after(img.anim_speed, self.__animateImage, name) self.n_imageAnimationIds[name] = anim_id def addAnimatedImage( self, name, imageFile, row=None, column=0, colspan=0, rowspan=0): self.warn("addAnimatedImage() is now deprecated - use addImage()") self.addImage(name, imageFile, row, column, colspan, rowspan) # function to set an alternative image, when a mouse goes over def setImageMouseOver(self, title, overImg): lab = self.__verifyItem(self.n_images, title) # first check over image & cache it fullPath = self.getImagePath(overImg) self.topLevel.after(0, self.__getImage, fullPath) leaveImg = lab.image.path lab.bind("<Leave>", lambda e: self.setImage(title, leaveImg)) lab.bind("<Enter>", lambda e: self.setImage(title, fullPath)) # function to set an image location def setImageLocation(self, location): if os.path.isdir(location): self.userImages = location else: raise Exception("Invalid image location: " + location) # get the full path of an image (including image folder) def getImagePath(self, imagePath): if imagePath is None: return None if self.userImages is not None: imagePath = os.path.join(self.userImages, imagePath) absPath = os.path.abspath(imagePath) return absPath # function to see if an image has changed def hasImageChanged(self, originalImage, newImage): newAbsImage = self.getImagePath(newImage) if originalImage is None: return True # filename has changed if originalImage.path != newAbsImage: return True # modification time has changed if originalImage.modTime != os.path.getmtime(newAbsImage): return True # no changes return False # function to remove image objects form cache def clearImageCache(self): self.n_imageCache = {} # internal function to build an image function from a string def __getImageData(self, imageData, fmt="gif"): if fmt=="png": if not TKINTERPNG_AVAILABLE: raise Exception( "TKINTERPNG library not found, PNG files not supported: " + imagePath) if sys.version_info >= (2, 7): self.warn( "Image processing for .PNGs is slow. .GIF is the recommended format") # png = PngImageTk(imagePath) # png.convert() # photo = png.image else: raise Exception("PNG images only supported in python 3: " + imagePath) else: imgObj = PhotoImage(data=imageData) imgObj.path = None imgObj.modTime = datetime.datetime.now() imgObj.isAnimated = False imgObj.animating = False return imgObj # internal function to check/build image object def __getImage(self, imagePath, checkCache=True, addToCache=True): if imagePath is None: return None # get the full image path imagePath = self.getImagePath(imagePath) # if we're caching, and we have a non-None entry in the cache - get it... photo = None if checkCache and imagePath in self.n_imageCache and self.n_imageCache[imagePath] is not None: photo = self.n_imageCache[imagePath] # if the image hasn't changed, use the cache if not self.hasImageChanged(photo, imagePath): pass # else load a new one elif os.path.isfile(imagePath): if os.access(imagePath, os.R_OK): imgType = imghdr.what(imagePath) if imgType is None: raise Exception( "Invalid file: " + imagePath + " is not a valid image") elif not imagePath.lower().endswith(imgType) and not ( imgType == "jpeg" and imagePath.lower().endswith("jpg")): # the image has been saved with the wrong extension raise Exception( "Invalid image extension: " + imagePath + " should be a ." + imgType) elif imagePath.lower().endswith('.gif'): photo = PhotoImage(file=imagePath) elif imagePath.lower().endswith('.ppm') or imagePath.lower().endswith('.pgm'): photo = PhotoImage(file=imagePath) elif imagePath.lower().endswith('jpg') or imagePath.lower().endswith('jpeg'): self.warn( "Image processing for .JPGs is slow. .GIF is the recommended format") photo = self.convertJpgToBmp(imagePath) elif imagePath.lower().endswith('.png'): # known issue here, some PNGs lack IDAT chunks # also, PNGs seem broken on python<3, maybe around the map # function used to generate pixel maps if not TKINTERPNG_AVAILABLE: raise Exception( "TKINTERPNG library not found, PNG files not supported: " + imagePath) if sys.version_info >= (2, 7): self.warn( "Image processing for .PNGs is slow. .GIF is the recommended format") png = PngImageTk(imagePath) png.convert() photo = png.image else: raise Exception("PNG images only supported in python 3: " + imagePath) else: raise Exception("Invalid image type: " + imagePath) else: raise Exception("Can't read image: " + imagePath) else: raise Exception("Image " + imagePath + " does not exist") # store the full poath to this image photo.path = imagePath # store the modification time photo.modTime = os.path.getmtime(imagePath) # sort out if it's an animated images if self.__checkIsAnimated(imagePath): self.__configAnimatedImage(photo) self.__preloadAnimatedImage(photo) else: photo.isAnimated = False photo.animating = False if addToCache: self.n_imageCache[imagePath] = photo return photo def getImageDimensions(self, name): img = self.__verifyItem(self.n_images, name).image return img.width(), img.height() # force replace the current image, with a new one def reloadImage(self, name, imageFile): label = self.__verifyItem(self.n_images, name) image = self.__getImage(imageFile, False) self.__populateImage(name, image) def reloadImageData(self, name, imageData): self.setImageData(name, imageData) def setImageData(self, name, imageData): label = self.__verifyItem(self.n_images, name) image = self.__getImageData(imageData) self.__populateImage(name, image) # replace the current image, with a new one def setImage(self, name, imageFile): label = self.__verifyItem(self.n_images, name) imageFile = self.getImagePath(imageFile) # only set the image if it's different if label.image.path == imageFile: self.warn("Not updating " + str(name) + ", " + str(imageFile) + " hasn't changed." ) return elif imageFile is None: return else: image = self.__getImage(imageFile) self.__populateImage(name, image) # internal function to update the image in a label def __populateImage(self, name, image): label = self.__verifyItem(self.n_images, name) label.image.animating = False label.config(image=image) label.config( anchor=CENTER, font=self.labelFont, background=self.__getContainerBg()) label.image = image # keep a reference! if image.isAnimated: anim_id = self.topLevel.after( image.anim_speed + 100, self.__animateImage, name, True) self.n_imageAnimationIds[name] = anim_id # removed - keep the label the same size, and crop images #h = image.height() #w = image.width() #label.config(height=h, width=w) self.topLevel.update_idletasks() # load image from base-64 encoded GIF # use base64 module to convert binary data to base64 def addImageData(self, name, imageData, row=None, column=0, colspan=0, rowspan=0, fmt="gif"): self.__verifyItem(self.n_images, name, True) imgObj = self.__getImageData(imageData, fmt) self.__addImageObj(name, imgObj, row, column, colspan, rowspan) # must be GIF or PNG def addImage( self, name, imageFile, row=None, column=0, colspan=0, rowspan=0): #image = re.escape(image) self.__verifyItem(self.n_images, name, True) imgObj = self.__getImage(imageFile) self.__addImageObj(name, imgObj, row, column, colspan, rowspan) def __addImageObj(self, name, img, row=None, column=0, colspan=0, rowspan=0): label = Label(self.__getContainer()) label.config( anchor=CENTER, font=self.labelFont, background=self.__getContainerBg()) label.config(image=img) label.image = img # keep a reference! if img is not None: h = img.height() w = img.width() label.config(height=h, width=w) self.n_images[name] = label self.__positionWidget(label, row, column, colspan, rowspan) if img.isAnimated: anim_id = self.topLevel.after( img.anim_speed, self.__animateImage, name, True) self.n_imageAnimationIds[name] = anim_id def setImageSize(self, name, width, height): img = self.__verifyItem(self.n_images, name) img.config(height=height, width=width) # def rotateImage(self, name, image): # img = self.__verifyItem(self.n_images, name) # if +ve then grow, else shrink... def zoomImage(self, name, x, y=''): if x <= 0: self.shrinkImage(name, x * -1, y * -1) else: self.growImage(name, x, y) # get every nth pixel (must be an integer) # 0 will return an empty image, 1 will return the image, 2 will be 1/2 the # size ... def shrinkImage(self, name, x, y=''): img = self.__verifyItem(self.n_images, name) image = img.image.subsample(x, y) img.config(image=image) img.config( anchor=CENTER, font=self.labelFont, background=self.__getContainerBg()) img.modImage = image # keep a reference! img.config(width=image.width(), height=image.height()) # get every nth pixel (must be an integer) # 0 won't work, 1 will return the original size def growImage(self, name, x, y=''): label = self.__verifyItem(self.n_images, name) image = label.image.zoom(x, y) label.config(image=image) label.config( anchor=CENTER, font=self.labelFont, background=self.__getContainerBg()) label.modImage = image # keep a reference! label.config(width=image.width(), height=image.height()) def convertJpgToBmp(self, image): if not NANOJPEG_AVAILABLE: raise Exception( "nanojpeg library not found, unable to display jpeg files: " + image) elif sys.version_info < (2, 7): raise Exception( "JPG images only supported in python 2.7+: " + image) else: # read the image into an array of bytes with open(image, 'rb') as inFile: import array buf = array.array("B", inFile.read()) # init the translator, and decode the array of bytes nanojpeg.njInit() nanojpeg.njDecode(buf, len(buf)) # determine a file name & type if nanojpeg.njIsColor(): # fileName = image.split('.jpg', 1)[0] + '.ppm' param = 6 else: # fileName = image.split('.jpg', 1)[0] + '.pgm' # fileName = "test3.pgm" param = 5 # create a string, starting with the header val = "P%d\n%d %d\n255\n" % ( param, nanojpeg.njGetWidth(), nanojpeg.njGetHeight()) # append the bytes, converted to chars val += ''.join(map(chr, nanojpeg.njGetImage())) # release any stuff nanojpeg.njDone() photo = PhotoImage(data=val) return photo # write the chars to a new file, if python3 we need to encode them first # with open(fileName, "wb") as outFile: # if sys.version_info[0] == 2: outFile.write(val) # else: outFile.write(val.encode('ISO-8859-1')) # # return fileName # function to set a background image # make sure this is done before everything else, otherwise it will cover # other widgets def setBgImage(self, image): image = self.__getImage(image, False, False) # make sure it's not using the cache # self.containerStack[0]['container'].config(image=image) # window as a # label doesn't work... self.bgLabel.config(image=image) self.containerStack[0]['container'].image = image # keep a reference! def removeBgImage(self): self.bgLabel.config(image=None) # self.containerStack[0]['container'].config(image=None) # window as a # label doesn't work... # remove the reference - shouldn't be cached self.containerStack[0]['container'].image = None def resizeBgImage(self): if self.containerStack[0]['container'].image is None: return else: pass ##################################### # FUNCTION to play sounds ##################################### # function to set a sound location def setSoundLocation(self, location): if os.path.isdir(location): self.userSounds = location else: raise Exception("Invalid sound location: " + location) # internal function to manage sound availability def __soundWrap(self, sound, isFile=False, repeat=False, wait=False): if self.platform == self.WINDOWS: if self.userSounds is not None and sound is not None: sound = os.path.join(self.userSounds, sound) if isFile: if os.path.isfile(sound) is False: raise Exception("Can't find sound: " + sound) if not sound.lower().endswith('.wav'): raise Exception("Invalid sound format: " + sound) kind = winsound.SND_FILENAME if not wait: kind = kind | winsound.SND_ASYNC else: if sound is None: kind = winsound.SND_FILENAME else: kind = winsound.SND_ALIAS if not wait: kind = kind | winsound.SND_ASYNC if repeat: kind = kind | winsound.SND_LOOP winsound.PlaySound(sound, kind) else: # sound not available at this time raise Exception( "Sound not supported on this platform: " + platform()) def playSound(self, sound, wait=False): self.__soundWrap(sound, True, False, wait) def stopSound(self): self.__soundWrap(None) def loopSound(self, sound): self.__soundWrap(sound, True, True) def soundError(self): self.__soundWrap("SystemHand") def soundWarning(self): self.__soundWrap("SystemAsterisk") def playNote(self, note, duration=200): if self.platform == self.WINDOWS: try: if isinstance(note, str): freq = self.NOTES[note.lower()] else: freq = note except KeyError: raise Exception("Error: cannot play note - " + note) try: if isinstance(duration, str): length = self.DURATIONS[duration.upper()] else: length = duration except KeyError: raise Exception("Error: cannot play duration - " + duration) try: winsound.Beep(freq, length) except RuntimeError: raise Exception( "Sound not available on this platform: " + platform()) else: # sound not available at this time raise Exception( "Sound not supported on this platform: " + platform()) ##################################### # FUNCTION for radio buttons ##################################### def addRadioButton( self, title, name, row=None, column=0, colspan=0, rowspan=0): var = None newRb = False # title - is the grouper # so, if we already have an entry in n_rbVars - get it if (title in self.n_rbVars): var = self.n_rbVars[title] # also get the list of rbVals vals = self.n_rbVals[title] # and if we already have the new item in that list - reject it if name in vals: raise Exception( "Invalid radio button: " + name + " already exists") # otherwise - append it to the list of vals else: vals.append(name) else: # if this is a new grouper - set it all up var = StringVar(self.topLevel) vals = [name] self.n_rbVars[title] = var self.n_rbVals[title] = vals newRb = True # finally, create the actual RadioButton rb = Radiobutton(self.__getContainer()) rb.config( text=name, variable=var, value=name, background=self.__getContainerBg(), activebackground=self.__getContainerBg(), font=self.rbFont, indicatoron = 1) rb.config(anchor = W) rb.bind("<Button-1>", self.__grabFocus) rb.DEFAULT_TEXT = name # either append to existing widget list if (title in self.n_rbs): self.n_rbs[title].append(rb) # or create a new one else: self.n_rbs[title] = [rb] #rb.bind("<Tab>", self.__focusNextWindow) #rb.bind("<Shift-Tab>", self.__focusLastWindow) # and select it, if it's the first item in the list if newRb: rb.select() self.__positionWidget(rb, row, column, colspan, rowspan, EW) def getRadioButton(self, title): var = self.__verifyItem(self.n_rbVars, title) return var.get() def setRadioButton(self, title, value, callFunction=True): vals = self.__verifyItem(self.n_rbVals, title) if value not in vals: raise Exception( "Invalid radio button: '" + value + "' doesn't exist") var = self.n_rbVars[title] var.set(value) # now call function if callFunction: item = self.__verifyItem(self.n_rbs, title)[0] if hasattr(item, 'cmd'): item.cmd() def setRadioTick(self, title, tick=True): radios = self.__verifyItem(self.n_rbs, title) for rb in radios: if tick: rb.config(indicatoron=1) else: rb.config(indicatoron=0) ##################################### # FUNCTION for list box ##################################### def addListBox( self, name, values=None, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_lbs, name, True) frame = ListBox(self.__getContainer()) vscrollbar = AutoScrollbar(frame) hscrollbar = AutoScrollbar(frame, orient=HORIZONTAL) lb = Listbox( frame, yscrollcommand=vscrollbar.set, xscrollcommand=hscrollbar.set) vscrollbar.grid(row=0, column=1, sticky=N + S) hscrollbar.grid(row=1, column=0, sticky=E + W) lb.grid(row=0, column=0, sticky=N + S + E + W) frame.grid_rowconfigure(0, weight=1) frame.grid_columnconfigure(0, weight=1) vscrollbar.config(command=lb.yview) hscrollbar.config(command=lb.xview) lb.config(font=self.lbFont) self.n_lbs[name] = lb lb.DEFAULT_TEXT="" if values is not None: lb.DEFAULT_TEXT='\n'.join(str(x) for x in values) for name in values: lb.insert(END, name) self.__positionWidget(frame, row, column, colspan, rowspan) # set how many rows to display def setListBoxRows(self, name, rows): lb = self.__verifyItem(self.n_lbs, name) lb.config(height=rows) # make the list single/multi select # default is single def setListBoxMulti(self, title, multi=True): lb = self.__verifyItem(self.n_lbs, title) if multi: lb.config(selectmode=EXTENDED) else: lb.config(selectmode=BROWSE) # make the list single/multi select # default is single def setListBoxSingle(self, title, single=True): self.setListSingle(title, single) def setListSingle(self, title, single=True): self.setListBoxMulti(title, not single) # select the specified item in the list def selectListItem(self, title, item, callFunction=True): lb = self.__verifyItem(self.n_lbs, title) items = lb.get(0, END) if len(items) > 0: for pos in range(len(items)): if items[pos] == item: self.selectListItemPos(title, pos) # now call function if callFunction: if hasattr(lb, 'cmd'): lb.cmd() break def selectListItemPos(self, title, pos): lb = self.__verifyItem(self.n_lbs, title) # sel = lb.curselection() lb.selection_clear(0, END) # show & select this item if pos >= 0: lb.see(pos) lb.activate(pos) lb.selection_set(pos) # replace the list items in the list box def updateListItems(self, title, items): self.clearListBox(title) self.addListItems(title, items) # add the items to the specified list box def addListItems(self, title, items): for i in items: self.addListItem(title, i) # add the item to the end of the list box def addListItem(self, title, item): lb = self.__verifyItem(self.n_lbs, title) # add it at the end lb.insert(END, item) # clear any selection items = lb.curselection() if len(items) > 0: lb.selection_clear(items) # show & select the newly added item self.selectListItemPos(title, lb.size() - 1) # returns a list containing 0 or more elements # all that are in the selected range def getListItems(self, title): lb = self.__verifyItem(self.n_lbs, title) items = lb.curselection() values = [] for loop in range(len(items)): values.append(lb.get(items[loop])) return values def getAllListItems(self, title): lb = self.__verifyItem(self.n_lbs, title) items = lb.get(0, END) return list(items) def getListItemsPos(self, title): lb = self.__verifyItem(self.n_lbs, title) items = lb.curselection() return items def removeListItemAtPos(self, title, pos): lb = self.__verifyItem(self.n_lbs, title) items = lb.get(0, END) if pos >= len(items): raise Exception("Invalid position: " + str(pos)) lb.delete(pos) # show & select this item if pos >= lb.size(): pos -= 1 self.selectListItemPos(title, pos) # remove a specific item from the listBox # will only remove the first item that matches the String def removeListItem(self, title, item): lb = self.__verifyItem(self.n_lbs, title) items = lb.get(0, END) for pos, val in enumerate(items): if val == item: lb.delete(pos) break # show & select this item if pos >= lb.size(): pos -= 1 self.selectListItemPos(title, pos) # functions to config def setListItemAtPosBg(self, title, pos, col): lb = self.__verifyItem(self.n_lbs, title) lb.itemconfig(pos, bg=col) def setListItemAtPosFg(self, title, pos, col): lb = self.__verifyItem(self.n_lbs, title) lb.itemconfig(pos, fg=col) def setListItemBg(self, title, item, col): lb = self.__verifyItem(self.n_lbs, title) items = lb.get(0, END) for pos, val in enumerate(items): if val == item: self.setListItemAtPosBg(title, pos, col) def setListItemFg(self, title, item, col): lb = self.__verifyItem(self.n_lbs, title) items = lb.get(0, END) for pos, val in enumerate(items): if val == item: self.setListItemAtPosFg(title, pos, col) def clearListBox(self, title): lb = self.__verifyItem(self.n_lbs, title) lb.delete(0, END) # clear ##################################### # FUNCTION for buttons ##################################### def __buildButton(self, title, func, frame, name=None): if name is None: name = title self.__verifyItem(self.n_buttons, title, True) but = Button(frame) but.config(text=name, font=self.buttonFont) but.DEFAULT_TEXT = name if func is not None: command = self.MAKE_FUNC(func, title) bindCommand = self.MAKE_FUNC(func, title, True) but.config(command=command) but.bind('<Return>', bindCommand) if self.platform in [self.MAC, self.LINUX]: but.config(highlightbackground=self.__getContainerBg()) #but.bind("<Tab>", self.__focusNextWindow) #but.bind("<Shift-Tab>", self.__focusLastWindow) self.n_buttons[title] = but return but def addNamedButton( self, name, title, func, row=None, column=0, colspan=0, rowspan=0): but = self.__buildButton(title, func, self.__getContainer(), name) self.__positionWidget(but, row, column, colspan, rowspan, None) def addButton(self, title, func, row=None, column=0, colspan=0, rowspan=0): but = self.__buildButton(title, func, self.__getContainer()) self.__positionWidget(but, row, column, colspan, rowspan, None) def setButton(self, name, text): but = self.__verifyItem(self.n_buttons, name) but.config(text=text) def setButtonImage(self, name, imgFile): but = self.__verifyItem(self.n_buttons, name) image = self.__getImage(imgFile) # works on Mac & Windows :) but.config(image=image, compound=TOP, text="", justify=LEFT) # but.config(image=image, compound=None, text="") # works on Windows, # not Mac but.image = image # adds a set of buttons, in the row, spannning specified columns # pass in a list of names & a list of functions (or a single function to # use for all) def addButtons( self, names, funcs, row=None, column=0, colspan=0, rowspan=0): if not isinstance(names, list): raise Exception( "Invalid button: " + names + ". It must be a list of buttons.") singleFunc = self.__checkFunc(names, funcs) frame = WidgetBox(self.__getContainer()) frame.config(background=self.__getContainerBg()) # make them into a 2D array, if not already if not isinstance(names[0], list): names = [names] # won't be used if single func if funcs is not None: funcs = [funcs] for bRow in range(len(names)): for i in range(len(names[bRow])): t = names[bRow][i] if funcs is None: tempFunc = None elif singleFunc is None: tempFunc = funcs[bRow][i] else: tempFunc = singleFunc but = self.__buildButton(t, tempFunc, frame) but.grid(row=bRow, column=i) Grid.columnconfigure(frame, i, weight=1) Grid.rowconfigure(frame, bRow, weight=1) frame.theWidgets.append(but) self.__positionWidget(frame, row, column, colspan, rowspan) self.n_frames.append(frame) ##################################### # FUNCTIONS for links ##################################### def __buildLink(self, title): link = Link(self.__getContainer()) link.config( text=title, font=self.linkFont, background=self.__getContainerBg()) self.n_links[title] = link return link # launches a browser to the specified page def addWebLink( self, title, page, row=None, column=0, colspan=0, rowspan=0): link = self.__buildLink(title) link.registerWebpage(page) self.__positionWidget(link, row, column, colspan, rowspan) # executes the specified function def addLink(self, title, func, row=None, column=0, colspan=0, rowspan=0): link = self.__buildLink(title) myF = self.MAKE_FUNC(func, title, True) link.registerCallback(myF) self.__positionWidget(link, row, column, colspan, rowspan) ##################################### # FUNCTIONS for grips ##################################### # adds a simple grip, used to drag the window around def addGrip(self, row=None, column=0, colspan=0, rowspan=0): grip = Grip(self.__getContainer()) self.__positionWidget(grip, row, column, colspan, rowspan) self.__addTooltip(grip, "Drag here to move", True) ##################################### # DatePicker Widget - using Form Container ##################################### def addDatePicker(self, name, row=None, column=0, colspan=0, rowspan=0): # initial DatePicker has these dates days = range(1, 32) self.MONTH_NAMES = calendar.month_name[1:] years = range(1970, 2021) # create a frame, and add the widgets self.startFrame(name, row, column, colspan, rowspan) self.setExpand("none") self.addLabel(name + "_DP_DayLabel", "Day:", 0, 0) self.setLabelAlign(name + "_DP_DayLabel", "w") self.addOptionBox(name + "_DP_DayOptionBox", days, 0, 1) self.addLabel(name + "_DP_MonthLabel", "Month:", 1, 0) self.setLabelAlign(name + "_DP_MonthLabel", "w") self.addOptionBox(name + "_DP_MonthOptionBox", self.MONTH_NAMES, 1, 1) self.addLabel(name + "_DP_YearLabel", "Year:", 2, 0) self.setLabelAlign(name + "_DP_YearLabel", "w") self.addOptionBox(name + "_DP_YearOptionBox", years, 2, 1) self.setOptionBoxFunction( name + "_DP_MonthOptionBox", self.__updateDatePickerDays) self.setOptionBoxFunction( name + "_DP_YearOptionBox", self.__updateDatePickerDays) self.stopFrame() # function to update DatePicker dropDowns def __updateDatePickerDays(self, title): if title.find("_DP_MonthOptionBox") > -1: title = title.split("_DP_MonthOptionBox")[0] elif title.find("_DP_YearOptionBox") > -1: title = title.split("_DP_YearOptionBox")[0] else: self.warn("Can't update days in DatePicker: " + title) return day = self.getOptionBox(title + "_DP_DayOptionBox") month = self.MONTH_NAMES.index( self.getOptionBox( title + "_DP_MonthOptionBox")) + 1 year = int(self.getOptionBox(title + "_DP_YearOptionBox")) days = range(1, calendar.monthrange(year, month)[1] + 1) self.changeOptionBox(title + "_DP_DayOptionBox", days) # keep previous day if possible myWarn = self.__pauseWarn() self.setOptionBox(title + "_DP_DayOptionBox", day) self.__resumeWarn(myWarn) # set a date for the named DatePicker def setDatePickerRange(self, title, startYear, endYear=None): if endYear is None: endYear = datetime.date.today().year years = range(startYear, endYear + 1) self.changeOptionBox(title + "_DP_YearOptionBox", years) def setDatePicker(self, title, date=None): if date is None: date = datetime.date.today() self.setOptionBox(title + "_DP_YearOptionBox", str(date.year)) self.setOptionBox(title + "_DP_MonthOptionBox", date.month - 1) self.setOptionBox(title + "_DP_DayOptionBox", date.day - 1) def getDatePicker(self, title): day = int(self.getOptionBox(title + "_DP_DayOptionBox")) month = self.MONTH_NAMES.index( self.getOptionBox( title + "_DP_MonthOptionBox")) + 1 year = int(self.getOptionBox(title + "_DP_YearOptionBox")) date = datetime.date(year, month, day) return date ##################################### # FUNCTIONS for labels ##################################### def __flash(self): if self.doFlash: for lab in self.n_flashLabs: bg = lab.cget("background") fg = lab.cget("foreground") lab.config(background=fg, foreground=bg) self.flashId = self.topLevel.after(250, self.__flash) def addFlashLabel( self, title, text=None, row=None, column=0, colspan=0, rowspan=0): self.addLabel(title, text, row, column, colspan, rowspan) self.n_flashLabs.append(self.n_labels[title]) self.doFlash = True def addLabel( self, title, text=None, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_labels, title, True) container = self.__getContainer() lab = Label(container) lab.inContainer = False if text is not None: lab.config(text=text) lab.DEFAULT_TEXT = text else: lab.DEFAULT_TEXT = "" lab.config( justify=LEFT, font=self.labelFont, background=self.__getContainerBg()) self.n_labels[title] = lab self.__positionWidget(lab, row, column, colspan, rowspan) def addEmptyLabel(self, title, row=None, column=0, colspan=0, rowspan=0): self.addLabel(title, None, row, column, colspan, rowspan) # adds a set of labels, in the row, spannning specified columns def addLabels(self, names, row=None, colspan=0, rowspan=0): frame = WidgetBox(self.__getContainer()) frame.config(background=self.__getContainerBg()) for i in range(len(names)): self.__verifyItem(self.n_labels, names[i], True) lab = Label(frame) lab.config( text=names[i], font=self.labelFont, justify=LEFT, background=self.__getContainerBg()) lab.inContainer = False self.n_labels[names[i]] = lab lab.grid(row=0, column=i) Grid.columnconfigure(frame, i, weight=1) Grid.rowconfigure(frame, 0, weight=1) frame.theWidgets.append(lab) self.__positionWidget(frame, row, 0, colspan, rowspan) self.n_frames.append(frame) def setLabel(self, name, text): lab = self.__verifyItem(self.n_labels, name) lab.config(text=text) def getLabel(self, name): lab = self.__verifyItem(self.n_labels, name) return lab.cget("text") def clearLabel(self, name): self.setLabel(name, "") ##################################### # FUNCTIONS to add Text Area ##################################### def __buildTextArea(self, title, frame, scrollable=False): self.__verifyItem(self.n_textAreas, title, True) if scrollable: text = scrolledtext.ScrolledText(frame) else: text = Text(frame) text.config(font=self.taFont, width=20, height=10, undo=True) if self.platform in [self.MAC, self.LINUX]: text.config(highlightbackground=self.__getContainerBg()) text.bind("<Tab>", self.__focusNextWindow) text.bind("<Shift-Tab>", self.__focusLastWindow) # add a right click menu text.var = None self.__addRightClickMenu(text) self.n_textAreas[title] = text self.logTextArea(title) return text def addTextArea(self, title, row=None, column=0, colspan=0, rowspan=0): text = self.__buildTextArea(title, self.__getContainer()) self.__positionWidget( text, row, column, colspan, rowspan, N + E + S + W) def addScrolledTextArea( self, title, row=None, column=0, colspan=0, rowspan=0): text = self.__buildTextArea(title, self.__getContainer(), True) self.__positionWidget( text, row, column, colspan, rowspan, N + E + S + W) def getTextArea(self, title): self.__verifyItem(self.n_textAreas, title) text = self.n_textAreas[title].get('1.0', END + '-1c') return text def setTextArea(self, title, text): self.__verifyItem(self.n_textAreas, title) self.n_textAreas[title].insert('1.0', text) # functions to try to monitor text areas def clearTextArea(self, title): self.__verifyItem(self.n_textAreas, title) self.n_textAreas[title].delete('1.0', END) def logTextArea(self, title): newHash = self.__getTextAreaHash(title) self.n_taHashes[title] = newHash def textAreaChanged(self, title): newHash = self.__getTextAreaHash(title) return newHash != self.n_taHashes[title] def __getTextAreaHash(self, title): self.__verifyItem(self.n_textAreas, title) text = self.getTextArea(title) md5 = hashlib.md5(str.encode(text)).digest() return md5 ##################################### # FUNCTIONS to add Tree Widgets ##################################### def addTree(self, title, data, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_trees, title, True) frame = ScrollPane( self.__getContainer(), relief=RAISED, borderwidth=2, bg="white", highlightthickness=0, takefocus=1) self.__positionWidget(frame, row, column, colspan, rowspan, "NSEW") xmlDoc = parseString(data) item = ajTreeData(xmlDoc.documentElement) node = ajTreeNode(frame.getPane(), None, item) self.n_trees[title] = node # update() & expand() called in go() function def setTreeEditable(self, title, value=True): tree = self.__verifyItem(self.n_trees, title) tree.item.setCanEdit(value) def setTreeBg(self, title, colour): tree = self.__verifyItem(self.n_trees, title) tree.setBgColour(colour) def setTreeFg(self, title, colour): tree = self.__verifyItem(self.n_trees, title) tree.setFgColour(colour) def setTreeHighlightBg(self, title, colour): tree = self.__verifyItem(self.n_trees, title) tree.setBgHColour(colour) def setTreeHighlightFg(self, title, colour): tree = self.__verifyItem(self.n_trees, title) tree.setFgHColour(colour) def setTreeDoubleClickFunction(self, title, func): if func is not None: tree = self.__verifyItem(self.n_trees, title) command = self.MAKE_FUNC(func, title) tree.item.registerDblClick(command) def setTreeEditFunction(self, title, func): if func is not None: tree = self.__verifyItem(self.n_trees, title) command = self.MAKE_FUNC(func, title) tree.registerEditEvent(command) # get whole tree as XML def getTreeXML(self, title): tree = self.__verifyItem(self.n_trees, title) return tree.item.node.toxml() # get selected node as a string def getTreeSelected(self, title): tree = self.__verifyItem(self.n_trees, title) return tree.getSelectedText() # get selected node (and children) as XML def getTreeSelectedXML(self, title): tree = self.__verifyItem(self.n_trees, title) item = tree.getSelected() if item is not None: return item.node.toxml() else: return None ##################################### # FUNCTIONS to add Message Box ##################################### def addMessage( self, title, text, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_messages, title, True) mess = Message(self.__getContainer()) mess.config(font=self.messageFont) mess.config(justify=LEFT, background=self.__getContainerBg()) if text is not None: mess.config(text=text) mess.DEFAULT_TEXT = text else: mess.DEFAULT_TEXT = "" if self.platform in [self.MAC, self.LINUX]: mess.config(highlightbackground=self.__getContainerBg()) self.n_messages[title] = mess self.__positionWidget(mess, row, column, colspan, rowspan) # mess.bind("<Configure>", lambda e: mess.config(width=e.width-10)) def addEmptyMessage(self, title, row=None, column=0, colspan=0, rowspan=0): self.addMessage(title, None, row, column, colspan, rowspan) def setMessage(self, title, text): mess = self.__verifyItem(self.n_messages, title) mess.config(text=text) def clearMessage(self, title): self.setMessage(title, "") ##################################### # FUNCTIONS for entry boxes ##################################### def __buildEntry(self, title, frame, secret=False, words=[]): self.__verifyItem(self.n_entries, title, True) # if we are an autocompleter if len(words) > 0: ent = AutoCompleteEntry(words, frame) ent.config(font=self.entryFont) else: ent = Entry(frame) ent.var = StringVar(self.topLevel) ent.config(textvariable=ent.var, font=self.entryFont) ent.inContainer = False ent.showingDefault = False # current status of entry ent.default = "" # the default value to show (if set) ent.DEFAULT_TEXT = "" # the default value for language support ent.myTitle = title # thr title of the entry ent.isNumeric = False # if the entry is numeric # configure it to be secret if secret: ent.config(show="*") if self.platform in [self.MAC, self.LINUX]: ent.config(highlightbackground=self.__getContainerBg()) ent.bind("<Tab>", self.__focusNextWindow) ent.bind("<Shift-Tab>", self.__focusLastWindow) # add a right click menu self.__addRightClickMenu(ent) self.n_entries[title] = ent self.n_entryVars[title] = ent.var return ent def addEntry( self, title, row=None, column=0, colspan=0, rowspan=0, secret=False): ent = self.__buildEntry(title, self.__getContainer(), secret) self.__positionWidget(ent, row, column, colspan, rowspan) def addAutoEntry( self, title, words, row=None, column=0, colspan=0, rowspan=0): ent = self.__buildEntry( title, self.__getContainer(), secret=False, words=words) self.__positionWidget(ent, row, column, colspan, rowspan) def addLabelAutoEntry( self, title, words, row=None, column=0, colspan=0, rowspan=0, secret=False): frame = self.__getLabelBox(title) ent = self.__buildEntry(title, frame, secret, words=words) self.__packLabelBox(frame, ent) self.__positionWidget(frame, row, column, colspan, rowspan) def __validateNumericEntry( self, action, index, value_if_allowed, prior_value, text, validation_type, trigger_type, widget_name): if action == "1": if text in '0123456789.-+': try: if len(value_if_allowed) == 1 and value_if_allowed in '.-': return True elif len(value_if_allowed) == 2 and value_if_allowed == '-.': return True else: float(value_if_allowed) return True except ValueError: self.containerStack[0]['container'].bell() return False else: self.containerStack[0]['container'].bell() return False else: return True def addNumericEntry( self, title, row=None, column=0, colspan=0, rowspan=0, secret=False): ent = self.__buildEntry(title, self.__getContainer(), secret) self.__positionWidget(ent, row, column, colspan, rowspan) if self.validateNumeric is None: self.validateNumeric = (self.containerStack[0]['container'].register( self.__validateNumericEntry), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W') ent.isNumeric = True ent.config(validate='key', validatecommand=self.validateNumeric) self.setEntryTooltip(title, "Numeric data only.") def addLabelNumericEntry( self, title, row=None, column=0, colspan=0, rowspan=0, secret=False): self. addNumericLabelEntry( title, row, column, colspan, rowspan, secret) def addNumericLabelEntry( self, title, row=None, column=0, colspan=0, rowspan=0, secret=False): frame = self.__getLabelBox(title) ent = self.__buildEntry(title, frame, secret) self.__packLabelBox(frame, ent) self.__positionWidget(frame, row, column, colspan, rowspan) if self.validateNumeric is None: self.validateNumeric = (self.containerStack[0]['container'].register( self.__validateNumericEntry), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W') ent.isNumeric = True ent.config(validate='key', validatecommand=self.validateNumeric) self.setEntryTooltip(title, "Numeric data only.") def addSecretEntry(self, title, row=None, column=0, colspan=0, rowspan=0): self.addEntry(title, row, column, colspan, rowspan, True) def addLabelEntry( self, title, row=None, column=0, colspan=0, rowspan=0, secret=False): frame = self.__getLabelBox(title) ent = self.__buildEntry(title, frame, secret) self.__packLabelBox(frame, ent) self.__positionWidget(frame, row, column, colspan, rowspan) def addLabelSecretEntry( self, title, row=None, column=0, colspan=0, rowspan=0): self.addSecretLabelEntry(title, row, column, colspan, rowspan) def addSecretLabelEntry( self, title, row=None, column=0, colspan=0, rowspan=0): self.addLabelEntry(title, row, column, colspan, rowspan, True) def getEntry(self, name): self.__verifyItem(self.n_entryVars, name) entry = self.__verifyItem(self.n_entries, name) if entry.showingDefault: if entry.isNumeric: return 0 else: return "" else: val = self.n_entryVars[name].get() if entry.isNumeric: if len(val) == 0 or (len(val) == 1 and val in '.-') or (len(val) == 2 and val == "-."): return 0 else: return float(val) else: return val def setEntry(self, name, text): self.__verifyItem(self.n_entryVars, name) self.__updateEntryDefault(name, mode="set") self.n_entryVars[name].set(text) def __entryIn(self, name): self.__updateEntryDefault(name, "in") def __entryOut(self, name): self.__updateEntryDefault(name, "out") def __updateEntryDefault(self, name, mode=None): self.__verifyItem(self.n_entryVars, name) entry = self.__verifyItem(self.n_entries, name) # ignore this if no default to apply if entry.default == "": return current = self.n_entryVars[name].get() # clear & remove default if mode == "set" or (mode in [ "in", "clear"] and entry.showingDefault): self.n_entryVars[name].set("") entry.showingDefault = False entry.config(justify=entry.oldJustify, foreground=entry.oldFg) elif mode == "out" and current == "": self.n_entryVars[name].set(entry.default) entry.config(justify='center', foreground='grey') entry.showingDefault = True def updateDefaultText(self, name, text): self.__verifyItem(self.n_entryVars, name) entry = self.__verifyItem(self.n_entries, name) current = self.n_entryVars[name].get() if entry.showingDefault: self.n_entryVars[name].set(text) entry.default = text def setEntryDefault(self, name, text="default"): entry = self.__verifyItem(self.n_entries, name) self.__verifyItem(self.n_entryVars, name) # remember current settings - to return to entry.oldJustify = entry.cget('justify') entry.oldFg = entry.cget('foreground') entry.config(justify='center', foreground='grey') # show the new text self.n_entryVars[name].set(text) entry.showingDefault = True entry.default = text entry.DEFAULT_TEXT = text # bind commands to show/remove the default in_command = self.MAKE_FUNC(self.__entryIn, name, True) out_command = self.MAKE_FUNC(self.__entryOut, name, True) entry.bind("<FocusIn>", in_command, add="+") entry.bind("<FocusOut>", out_command, add="+") def clearEntry(self, name): self.__verifyItem(self.n_entryVars, name) self.n_entryVars[name].set("") self.__updateEntryDefault(name, mode="clear") self.setFocus(name) def clearAllEntries(self): for entry in self.n_entryVars: self.n_entryVars[entry].set("") self.__updateEntryDefault(entry, mode="clear") def setFocus(self, name): self.__verifyItem(self.n_entries, name) self.n_entries[name].focus_set() def __lookupValue(self, myDict, val): for name in myDict: if isinstance(myDict[name], type([])): # array of cbs for rb in myDict[name]: if rb == val: return name else: if myDict[name] == val: return name return None def __getWidgetName(self, widg): name = widg.__class__.__name__ if name.lower() == "tk": return self.__getTopLevel().title() elif name == "Listbox": return self.__lookupValue(self.n_lbs, widg) elif name == "Button": # merge together Buttons & Toolbar Buttons z = self.n_buttons.copy() z.update(self.n_tbButts) return self.__lookupValue(z, widg) elif name == "Entry": return self.__lookupValue(self.n_entries, widg) elif name == "Scale": return self.__lookupValue(self.n_scales, widg) elif name == "Checkbutton": return self.__lookupValue(self.n_cbs, widg) elif name == "Radiobutton": return self.__lookupValue(self.n_rbs, widg) elif name == "Spinbox": return self.__lookupValue(self.n_spins, widg) elif name == "OptionMenu": return self.__lookupValue(self.n_options, widg) elif name == "Text": return self.__lookupValue(self.n_textAreas, widg) elif name == "Link": return self.__lookupValue(self.n_links, widg) else: raise Exception("Unknown widget type: " + name) def getFocus(self): widg = self.topLevel.focus_get() return self.__getWidgetName(widg) ##################################### # FUNCTIONS for progress bars (meters) ##################################### def __addMeter( self, name, type="METER", row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_meters, name, True) if type == "SPLIT": meter = SplitMeter(self.__getContainer(), font=self.meterFont) elif type == "DUAL": meter = DualMeter(self.__getContainer(), font=self.meterFont) else: meter = Meter(self.__getContainer(), font=self.meterFont) self.n_meters[name] = meter self.__positionWidget(meter, row, column, colspan, rowspan) def addMeter(self, name, row=None, column=0, colspan=0, rowspan=0): self.__addMeter(name, "METER", row, column, colspan, rowspan) def addSplitMeter(self, name, row=None, column=0, colspan=0, rowspan=0): self.__addMeter(name, "SPLIT", row, column, colspan, rowspan) def addDualMeter(self, name, row=None, column=0, colspan=0, rowspan=0): self.__addMeter(name, "DUAL", row, column, colspan, rowspan) # update the value of the specified meter # note: expects a value between 0 (-100 for split/dual) & 100 def setMeter(self, name, value=0.0, text=None): item = self.__verifyItem(self.n_meters, name) item.set(value, text) def getMeter(self, name): item = self.__verifyItem(self.n_meters, name) return item.get() # a single colour for meters, a list of 2 colours for splits & duals def setMeterFill(self, name, colour): item = self.__verifyItem(self.n_meters, name) item.configure(fill=colour) ##################################### # FUNCTIONS for seperators ##################################### def addSeparator( self, row=None, column=0, colspan=0, rowspan=0, colour=None): self.warn( ".addSeparator() is deprecated. You should be using .addHorizontalSeparator() or .addVerticalSeparator()") self.addHorizontalSeparator(row, column, colspan, rowspan, colour) def addHorizontalSeparator( self, row=None, column=0, colspan=0, rowspan=0, colour=None): self.__addSeparator( "horizontal", row, column, colspan, rowspan, colour) def addVerticalSeparator( self, row=None, column=0, colspan=0, rowspan=0, colour=None): self.__addSeparator("vertical", row, column, colspan, rowspan, colour) def __addSeparator( self, orient, row=None, column=0, colspan=0, rowspan=0, colour=None): sep = Separator(self.__getContainer(), orient) if colour is not None: sep.configure(fg=colour) self.n_separators.append(sep) self.__positionWidget(sep, row, column, colspan, rowspan) ##################################### # FUNCTIONS for pie charts ##################################### def addPieChart( self, name, fracs, row=None, column=0, colspan=0, rowspan=0): self.__verifyItem(self.n_pieCharts, name, True) pie = PieChart(self.__getContainer(), fracs, self.__getContainerBg()) self.n_pieCharts[name] = pie self.__positionWidget(pie, row, column, colspan, rowspan, sticky=None) def setPieChart(self, title, name, value): pie = self.__verifyItem(self.n_pieCharts, title) pie.setValue(name, value) ##################################### # FUNCTIONS for tool bar ##################################### # adds a list of buttons along the top - like a tool bar... def addToolbar(self, names, funcs, findIcon=False): if not self.hasTb: self.hasTb = True image = None singleFunc = self.__checkFunc(names, funcs) if not isinstance(names, list): names = [names] for i in range(len(names)): t = names[i] if (t in self.n_tbButts): raise Exception( "Invalid toolbar button name: " + t + " already exists") if findIcon: # turn off warnings about PNGs myWarn = self.__pauseWarn() imgFile = os.path.join(self.icon_path, t.lower() + ".png") try: image = self.__getImage(imgFile) except Exception as e: image = None self.__resumeWarn(myWarn) but = Button(self.tb) self.n_tbButts[t] = but if singleFunc is not None: u = self.MAKE_FUNC(singleFunc, t) else: u = self.MAKE_FUNC(funcs[i], t) but.config(text=t, command=u, relief=FLAT, font=self.tbFont) if image is not None: but.image = image # works on Mac & Windows :) but.config(image=image, compound=TOP, text="", justify=LEFT) but.pack(side=LEFT, padx=2, pady=2) but.tt_var = self.__addTooltip(but, t.title(), True) def setToolbarIcon(self, name, icon): if (name not in self.n_tbButts): raise Exception("Unknown toolbar name: " + name) imgFile = os.path.join(self.icon_path, icon.lower() + ".png") myWarn = self.__pauseWarn() self.setToolbarImage(name, imgFile) self.__resumeWarn(myWarn) self.n_tbButts[name].tt_var.set(icon) def setToolbarImage(self, name, imgFile): if (name not in self.n_tbButts): raise Exception("Unknown toolbar name: " + name) image = self.__getImage(imgFile) self.n_tbButts[name].config(image=image) self.n_tbButts[name].image = image def setToolbarButtonEnabled(self, name): self.setToolbarButtonDisabled(name, False) def setToolbarButtonDisabled(self, name, disabled=True): if (name not in self.n_tbButts): raise Exception("Unknown toolbar name: " + name) if disabled: self.n_tbButts[name].config(state=DISABLED) else: self.n_tbButts[name].config(state=NORMAL) def setToolbarEnabled(self): self.setToolbarDisabled(False) def setToolbarDisabled(self, disabled=True): for but in self.n_tbButts.keys(): if disabled: self.n_tbButts[but].config(state=DISABLED) else: self.n_tbButts[but].config(state=NORMAL) # functions to hide & show the toolbar def hideToolbar(self): if self.hasTb: self.tb.pack_forget() def showToolbar(self): if self.hasTb: self.tb.pack(before=self.containerStack[0][ 'container'], side=TOP, fill=X) ##################################### # FUNCTIONS for menu bar ##################################### def __initMenu(self): # create a menu bar - only shows if populated if not self.hasMenu: # self.topLevel.option_add('*tearOff', FALSE) self.hasMenu = True self.menuBar = Menu(self.topLevel) if self.platform == self.MAC: appmenu = Menu(self.menuBar, name='apple') self.menuBar.add_cascade(menu=appmenu) self.n_menus["MAC_APP"] = appmenu elif self.platform == self.WINDOWS: # sysMenu must be added last, otherwise other menus vanish sysMenu = Menu(self.menuBar, name='system', tearoff=False) self.n_menus["WIN_SYS"] = sysMenu # add a parent menu, for menu items def createMenu(self, title, tearable=False, showInBar=True): self.__verifyItem(self.n_menus, title, True) self.__initMenu() if self.platform == self.MAC and tearable: self.warn("Tearable menus (" + title + ") not supported on MAC") tearable = False theMenu = Menu(self.menuBar, tearoff=tearable) if showInBar: self.menuBar.add_cascade(label=title, menu=theMenu) self.n_menus[title] = theMenu return theMenu def createRightClickMenu(self, title, showInBar=False): men = self.createMenu(title, False, showInBar) if gui.GET_PLATFORM() == gui.LINUX: self.addMenuSeparator(title) return men # add items to the named menu def addMenuItem( self, title, item, func=None, kind=None, shortcut=None, underline=-1, rb_id=None, createBinding=True): # set the initial menubar self.__initMenu() # get or create an initial menu if title is not None: try: theMenu = self.__verifyItem(self.n_menus, title, False) except: theMenu = self.createMenu(title) if underline > -1 and self.platform == self.MAC: self.warn("Underlining menu items not available on MAC") if func is not None: u = self.MAKE_FUNC(func, item, True) else: u = None a = b = None if shortcut is not None: # MODIFIERS=["Control", "Ctrl", "Option", "Opt", "Alt", "Shift", "Command", "Cmd", "Meta"] # UGLY formatting of accelerator & shortcut a = b = shortcut.lower().replace("+", "-") a = a.replace("control", "ctrl") a = a.replace("command", "cmd") a = a.replace("option", "opt") b = b.replace("ctrl", "Control") b = b.replace("control", "Control") b = b.replace("cmd", "Command") b = b.replace("command", "Command") b = b.replace("option", "Option") b = b.replace("opt", "Option") b = b.replace("alt", "Alt") b = b.replace("shift", "Shift") b = b.replace("meta", "Meta") if gui.GET_PLATFORM() != gui.MAC: a = a.replace("cmd", "ctrl") b = b.replace("Command", "Control") b = "<" + b + ">" a = a.title() self.__verifyItem(self.n_accelerators, a, True) self.n_accelerators.append(a) if u is not None and createBinding: self.topLevel.bind_all(b, u) if item == "-" or kind == "separator": theMenu.add_separator() elif kind == "topLevel" or title is None: if self.platform == self.MAC: self.warn( "Unable to make topLevel menus (" + item + ") on Mac") else: self.menuBar.add_command( label=item, command=u, accelerator=a, underline=underline) elif kind == "rb": varName = title + "rb" + item newRb = False if (varName in self.n_menuVars): var = self.n_menuVars[varName] else: newRb = True var = StringVar(self.topLevel) self.n_menuVars[varName] = var theMenu.add_radiobutton( label=rb_id, command=u, variable=var, value=rb_id, accelerator=a, underline=underline) if newRb: self.setMenuRadioButton(title, item, rb_id) elif kind == "cb": varName = title + "cb" + item self.__verifyItem(self.n_menuVars, varName, True) var = StringVar(self.topLevel) self.n_menuVars[varName] = var theMenu.add_checkbutton( label=item, command=u, variable=var, onvalue=1, offvalue=0, accelerator=a, underline=underline) elif kind == "sub": self.__verifyItem(self.n_menus, item, True) subMenu = Menu(theMenu, tearoff=False) self.n_menus[item] = subMenu theMenu.add_cascade(menu=subMenu, label=item) else: theMenu.add_command( label=item, command=u, accelerator=a, underline=underline) ################# # wrappers for other menu types def addMenuList(self, menuName, names, funcs): # deal with a dict_keys object - messy!!!! if not isinstance(names, list): names = list(names) # append some Nones, if it's a list and contains separators if funcs is not None: if not callable(funcs): seps = names.count("-") for i in range(seps): funcs.append(None) singleFunc = self.__checkFunc(names, funcs) # add menu items for t in names: if funcs is None: u = None elif singleFunc is not None: u = singleFunc else: u = funcs.pop(0) self.addMenuItem(menuName, t, u) def __checkCopyAndPaste(self, event, widget=None): if self.copyAndPaste.inUse: if event is None or not ( event.type == "10" and self.GET_PLATFORM() == self.LINUX): self.disableMenu("EDIT", 10) if event is not None: widget = event.widget # 9 = ENTER/10 = LEAVE/4=RCLICK/3=PRESS/2=PASTE if event is None or event.type in ["9", "3", "4", "2"]: self.copyAndPaste.setUp(widget) if self.copyAndPaste.canCopy: self.enableMenuItem("EDIT", "Copy") if self.copyAndPaste.canCut: self.enableMenuItem("EDIT", "Cut") if self.copyAndPaste.canPaste: self.enableMenuItem("EDIT", "Paste") self.enableMenuItem("EDIT", "Clear Clipboard") if self.copyAndPaste.canSelect: self.enableMenuItem("EDIT", "Select All") self.enableMenuItem("EDIT", "Clear All") if self.copyAndPaste.canUndo: self.enableMenuItem("EDIT", "Undo") if self.copyAndPaste.canRedo: self.enableMenuItem("EDIT", "Redo") return True else: return False def __copyAndPasteHelper(self, menu): widget = self.topLevel.focus_get() self.copyAndPaste.setUp(widget) if menu == "Cut": self.copyAndPaste.cut() elif menu == "Copy": self.copyAndPaste.copy() elif menu == "Paste": self.copyAndPaste.paste() elif menu == "Select All": self.copyAndPaste.selectAll() elif menu == "Clear Clipboard": self.copyAndPaste.clearClipboard() elif menu == "Clear All": self.copyAndPaste.clearText() elif menu == "Undo": self.copyAndPaste.undo() elif menu == "Redo": self.copyAndPaste.redo() # add a single entry for a menu def addSubMenu(self, menu, subMenu): self.addMenuItem(menu, subMenu, None, "sub") def addMenu(self, name, func, shortcut=None, underline=-1): self.addMenuItem(None, name, func, "topLevel", shortcut, underline) def addMenuSeparator(self, menu): self.addMenuItem(menu, "-") def addMenuCheckBox( self, menu, name, func=None, shortcut=None, underline=-1): self.addMenuItem(menu, name, func, "cb", shortcut, underline) def addMenuRadioButton( self, menu, name, value, func=None, shortcut=None, underline=-1): self.addMenuItem(menu, name, func, "rb", shortcut, underline, value) ################# # wrappers for setters def __setMenu(self, menu, title, value, kind): title = menu + kind + title var = self.__verifyItem(self.n_menuVars, title) if kind == "rb": var.set(value) elif kind == "cb": if value is True: var.set("1") elif value is False: var.set("0") else: if var.get() == "1": var.set("0") else: var.set("1") def setMenuCheckBox(self, menu, name, value=None): self.__setMenu(menu, name, value, "cb") def setMenuRadioButton(self, menu, name, value): self.__setMenu(menu, name, value, "rb") # set align = "none" to remove text def setMenuImage(self, menu, title, image, align="left"): theMenu = self.__verifyItem(self.n_menus, menu) imageObj = self.__getImage(image) if 16 != imageObj.width() or imageObj.width() != imageObj.height(): self.warn("Invalid image resolution for menu item " + title + " (" + image + ") - should be 16x16") #imageObj = imageObj.subsample(2,2) theMenu.entryconfigure(title, image=imageObj, compound=align) def setMenuIcon(self, menu, title, icon, align="left"): image = os.path.join(self.icon_path, icon.lower() + ".png") myWarn = self.__pauseWarn() self.setMenuImage(menu, title, image, align) self.__resumeWarn(myWarn) def disableMenubar(self): for theMenu in self.n_menus: self.disableMenu(theMenu) # loop through top level menus # and diable any that got missed numMenus = self.menuBar.index("end") if numMenus is not None: for item in range(numMenus+1): self.menuBar.entryconfig(item, state=DISABLED) def enableMenubar(self): for theMenu in self.n_menus: self.enableMenu(theMenu) # loop through toplevel menus # and enable anythat got missed numMenus = self.menuBar.index("end") if numMenus is not None: for item in range(numMenus+1): self.menuBar.entryconfig(item, state=NORMAL) def disableMenu( self, title, limit=None): self.__changeMenuState( title, DISABLED, limit) def enableMenu( self, title, limit=None): self.__changeMenuState( title, NORMAL, limit) def __changeMenuState(self, title, state, limit=None): theMenu = self.__verifyItem(self.n_menus, title) numMenus = theMenu.index("end") if numMenus is not None: # MAC_APP (and others?) returns None for item in range(numMenus + 1): if limit is not None and limit == item: break try: theMenu.entryconfigure(item, state=state) except: pass # separator # also diable the toplevel menu that matches this one try: self.menuBar.entryconfig(self.menuBar.index(title), state=state) except TclError: # ignore if we fail... pass def disableMenuItem(self, title, item): theMenu = self.__verifyItem(self.n_menus, title) theMenu.entryconfigure(item, state=DISABLED) def enableMenuItem(self, title, item): theMenu = self.__verifyItem(self.n_menus, title) theMenu.entryconfigure(item, state=NORMAL) ################# # wrappers for getters def __getMenu(self, menu, title, kind): title = menu + kind + title var = self.__verifyItem(self.n_menuVars, title) if kind == "rb": return var.get() elif kind == "cb": if var.get() == "1": return True else: return False def getMenuCheckBox(self, menu, title): return self.__getMenu(menu, title, "cb") def getMenuRadioButton(self, menu, title): return self.__getMenu(menu, title, "rb") ################# # wrappers for platform specific menus # enables the preferences item in the app menu def addMenuPreferences(self, func): if self.platform == self.MAC: self.__initMenu() u = self.MAKE_FUNC(func, "preferences") self.topLevel.createcommand('tk::mac::ShowPreferences', u) else: self.warn("The Preferences Menu is specific to Mac OSX") # MAC help mnenu def addMenuHelp(self, func): if self.platform == self.MAC: self.__initMenu() helpMenu = Menu(self.menuBar, name='help') self.menuBar.add_cascade(menu=helpMenu, label='Help') u = self.MAKE_FUNC(func, "help") self.topLevel.createcommand('tk::mac::ShowHelp', u) self.n_menus["MAC_HELP"] = helpMenu else: self.warn("The Help Menu is specific to Mac OSX") # Shows a Window menu def addMenuWindow(self): if self.platform == self.MAC: self.__initMenu() windowMenu = Menu(self.menuBar, name='window') self.menuBar.add_cascade(menu=windowMenu, label='Window') self.n_menus["MAC_WIN"] = windowMenu else: self.warn("The Window Menu is specific to Mac OSX") # adds an edit menu - by default only as a pop-up # if inMenuBar is True - then show in menu too def addMenuEdit(self, inMenuBar=False): self.__initMenu() editMenu = Menu(self.menuBar, tearoff=False) if inMenuBar: self.menuBar.add_cascade(menu=editMenu, label='Edit ') self.n_menus["EDIT"] = editMenu self.copyAndPaste.inUse = True if gui.GET_PLATFORM() == gui.LINUX: self.addMenuSeparator("EDIT") if gui.GET_PLATFORM() == gui.MAC: shortcut = "Cmd+" else: shortcut = "Control-" eList = [ ('Cut', lambda e: self.__copyAndPasteHelper("Cut"), "X", False), ('Copy', lambda e: self.__copyAndPasteHelper("Copy"), "C", False), ('Paste', lambda e: self.__copyAndPasteHelper("Paste"), "V", False), ('Select All', lambda e: self.__copyAndPasteHelper("Select All"), "A", True if gui.GET_PLATFORM() == gui.MAC else False), ('Clear Clipboard', lambda e: self.__copyAndPasteHelper("Clear Clipboard"), "B", True)] for (txt, cmd, sc, bind) in eList: acc = shortcut + sc self.addMenuItem( "EDIT", txt, cmd, shortcut=acc, createBinding=bind) # add a clear option self.addMenuSeparator("EDIT") self.addMenuItem( "EDIT", "Clear All", lambda e: self.__copyAndPasteHelper("Clear All")) self.addMenuSeparator("EDIT") self.addMenuItem( "EDIT", 'Undo', lambda e: self.__copyAndPasteHelper("Undo"), shortcut=shortcut + "Z", createBinding=False) self.addMenuItem("EDIT", 'Redo', lambda e: self.__copyAndPasteHelper( "Redo"), shortcut="Shift-" + shortcut + "Z", createBinding=True) self.disableMenu("EDIT") def appJarAbout(self, menu=None): self.infoBox("About appJar", "appJar\nCopyright Richard Jarvis, 2016") def appJarHelp(self, menu=None): self.infoBox("appJar Help", "For help, visit http://appJar.info") def addAppJarMenu(self): if self.platform == self.MAC: self.addMenuItem("MAC_APP", "About appJar", self.appJarAbout) self.addMenuWindow() self.addMenuHelp(self.appJarHelp) elif self.platform == self.WINDOWS: self.addMenuSeparator('WIN_SYS') self.addMenuItem("WIN_SYS", "About appJar", self.appJarAbout) self.addMenuItem("WIN_SYS", "appJar Help", self.appJarHelp) ##################################### # FUNCTIONS for status bar ##################################### def addStatus(self, header="", fields=1, side=None): self.warn("addStatus() is deprecated, please use addStatusbar()") self.addStatusbar(header, fields, side) def addStatusbar(self, header="", fields=1, side=None): self.hasStatus = True self.header = header self.statusFrame = Frame(self.appWindow) self.statusFrame.config(bd=1, relief=SUNKEN) self.statusFrame.pack(side=BOTTOM, fill=X, anchor=S) self.status = [] for i in range(fields): self.status.append(Label(self.statusFrame)) self.status[i].config( bd=1, relief=SUNKEN, anchor=W, font=self.statusFont, width=10) self.__addTooltip(self.status[i], "Status bar", True) if side == "LEFT": self.status[i].pack(side=LEFT) elif side == "RIGHT": self.status[i].pack(side=RIGHT) else: self.status[i].pack(side=LEFT, expand=1, fill=BOTH) def setStatus(self, text, field=0): self.warn("setStatus() is deprecated, please use setStatusbar()") self.setStatusbar(text, field) def setStatusbar(self, text, field=0): if self.hasStatus: if field is None: for status in self.status: status.config(text=self.__getFormatStatus(text)) elif field >= 0 and field < len(self.status): self.status[field].config(text=self.__getFormatStatus(text)) else: raise Exception("Invalid status field: " + str(field) + ". Must be between 0 and " + str(len(self.status) - 1)) def setStatusBg(self, colour, field=None): self.warn("setStatusBg() is deprecated, please use setStatusbarBg()") self.setStatusbarBg(colour, field) def setStatusbarBg(self, colour, field=None): if self.hasStatus: if field is None: for status in self.status: status.config(background=colour) elif field >= 0 and field < len(self.status): self.status[field].config(background=colour) else: raise Exception("Invalid status field: " + str(field) + ". Must be between 0 and " + str(len(self.status) - 1)) def setStatusbarFg(self, colour, field=None): if self.hasStatus: if field is None: for status in self.status: status.config(foreground=colour) elif field >= 0 and field < len(self.status): self.status[field].config(foreground=colour) else: raise Exception("Invalid status field: " + str(field) + ". Must be between 0 and " + str(len(self.status) - 1)) def setStatusbarWidth(self, width, field=None): if self.hasStatus: if field is None: for status in self.status: status.config(width=width) elif field >= 0 and field < len(self.status): self.status[field].config(width=width) else: raise Exception("Invalid status field: " + str(field) + ". Must be between 0 and " + str(len(self.status) - 1)) def clearStatusbar(self, field=None): if self.hasStatus: if field is None: for status in self.status: status.config(text=self.__getFormatStatus("")) elif field >= 0 and field < len(self.status): self.status[field].config(text=self.__getFormatStatus("")) else: raise Exception("Invalid status field: " + str(field) + ". Must be between 0 and " + str(len(self.status) - 1)) # formats the string shown in the status bar def __getFormatStatus(self, text): text = str(text) if len(text) == 0: return "" elif len(self.header) == 0: return text else: return self.header + ": " + text ##################################### # TOOLTIPS ##################################### def __addTooltip(self, item, text, hideWarn=False): if TOOLTIP_AVAILABLE: # turn off warnings about tooltips if hideWarn: myWarn = self.__pauseWarn() var = StringVar(self.topLevel) var.set(text) tip = ToolTip(item, delay=500, follow_mouse=1, textvariable=var) item.tooltip = tip if hideWarn: self.__resumeWarn(myWarn) return var elif not hideWarn: self.warn( "ToolTips unavailable - check tooltip.py is in the lib folder") ##################################### # FUNCTIONS to show pop-up dialogs ##################################### # function to access the last made pop_up def getPopUp(self): return self.topLevel.POP_UP def infoBox(self, title, message): self.topLevel.update_idletasks() MessageBox.showinfo(title, message) self.__bringToFront() def errorBox(self, title, message): self.topLevel.update_idletasks() MessageBox.showerror(title, message) self.__bringToFront() def warningBox(self, title, message): self.topLevel.update_idletasks() MessageBox.showwarning(title, message) self.__bringToFront() def yesNoBox(self, title, message): self.topLevel.update_idletasks() return MessageBox.askyesno(title, message) def questionBox(self, title, message): self.topLevel.update_idletasks() return MessageBox.askquestion(title, message) def okBox(self, title, message): self.topLevel.update_idletasks() return MessageBox.askokcancel(title, message) def retryBox(self, title, message): self.topLevel.update_idletasks() return MessageBox.askretrycancel(title, message) def openBox( self, title=None, dirName=None, fileTypes=None, asFile=False): self.topLevel.update_idletasks() # define options for opening options = {} if title is not None: options['title'] = title if dirName is not None: options['initialdir'] = dirName if fileTypes is not None: options['filetypes'] = fileTypes if asFile: return filedialog.askopenfile(mode="r", **options) # will return "" if cancelled else: return filedialog.askopenfilename(**options) def saveBox( self, title=None, fileName=None, dirName=None, fileExt=".txt", fileTypes=None, asFile=False): self.topLevel.update_idletasks() if fileTypes is None: fileTypes = [('all files', '.*'), ('text files', '.txt')] # define options for opening options = {} options['defaultextension'] = fileExt options['filetypes'] = fileTypes options['initialdir'] = dirName options['initialfile'] = fileName options['title'] = title if asFile: return filedialog.asksaveasfile(mode='w', **options) # will return "" if cancelled else: return filedialog.asksaveasfilename(**options) def directoryBox(self, title=None, dirName=None): self.topLevel.update_idletasks() options = {} options['initialdir'] = dirName options['title'] = title options['mustexist'] = False fileName = filedialog.askdirectory(**options) if fileName == "": return None else: return fileName def colourBox(self, colour='#ff0000'): self.topLevel.update_idletasks() col = askcolor(colour) if col[1] is None: return None else: return col[1] def textBox(self, title, question): self.topLevel.update_idletasks() return TextDialog(self.topLevel, title, question).result def numberBox(self, title, question): return self.numBox(title, question) def numBox(self, title, question): self.topLevel.update_idletasks() return NumDialog(self.topLevel, title, question).result ##################################### # ProgressBar Class # from: http://tkinter.unpythonic.net/wiki/ProgressMeter # A gradient fill will be applied to the Meter ##################################### class Meter(Frame): def __init__(self, master, width=100, height=20, bg='white', fillColour='orchid1', value=0.0, text=None, font=None, fg='black', *args, **kw): # call the super constructor Frame.__init__(self, master, bg=bg, width=width, height=height, relief='ridge', bd=3, *args, **kw) # remember the starting value self._value = value self._colour = fillColour self._midFill = fg # create the canvas self._canv = Canvas(self, bg=self['bg'], width=self['width'], height=self['height'], highlightthickness=0, relief='flat', bd=0) self._canv.pack(fill='both', expand=1) # create the text width, height = self.getWH(self._canv) self._text = self._canv.create_text( width / 2, height / 2, text='', fill=fg) if font: self._canv.itemconfigure(self._text, font=font) self.set(value, text) self.moveText() # bind refresh event self.bind('<Configure>', self._update_coords) # customised config setters def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): # properties to propagate to CheckBoxes kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) if "fill" in kw: self._colour = kw.pop("fill") if "fg" in kw: col = kw.pop("fg") self._canv.itemconfigure(self._text, fill=col) self._midFill = col if "bg" in kw: self._canv.config(bg=kw.pop("bg")) if "width" in kw: self._canv.config(width=kw.pop("width")) if "height" in kw: self._canv.config(height=kw.pop("height")) if "font" in kw: self._canv.itemconfigure(self._text, font=kw.pop("fillColour")) # propagate anything left if PYTHON2: Frame.config(self, cnf, **kw) else: super(Frame, self).config(cnf, **kw) self.makeBar() # called when resized def _update_coords(self, event): '''Updates the position of the text and rectangle inside the canvas when the size of the widget gets changed.''' self.makeBar() self.moveText() # getter def get(self): val = self._value try: txt = self._canv.itemcget(self._text, 'text') except: txt = None return val, txt # update the variables, then call makeBar def set(self, value=0.0, text=None): # make the value failsafe: value = value / 100.0 if value < 0.0: value = 0.0 elif value > 1.0: value = 1.0 self._value = value # if no text is specified use the default percentage string: if text is None: text = str(int(round(100 * value))) + ' %' # set the new text self._canv.itemconfigure(self._text, text=text) self.makeBar() # draw the bar def makeBar(self): width, height = self.getWH(self._canv) start = 0 fin = width * self._value self.drawLines(width, height, start, fin, self._value, self._colour) self._canv.update_idletasks() # move the text def moveText(self): width, height = self.getWH(self._canv) if hasattr(self, "_text"): self._canv.coords( self._text, width/2, height/2) # draw gradated lines, in given coordinates # using the specified colour def drawLines(self, width, height, start, fin, val, col, tags="gradient"): '''Draw a gradient''' # http://stackoverflow.com/questions/26178869/is-it-possible-to-apply-gradient-colours-to-bg-of-tkinter-python-widgets # remove the lines & midline self._canv.delete(tags) self._canv.delete("midline") # determine start & end colour (r1, g1, b1) = self.tint(col, -30000) (r2, g2, b2) = self.tint(col, 30000) # determine a direction & range if val < 0: direction = -1 limit = int(start - fin) else: direction = 1 limit = int(fin - start) # if lines to draw if limit != 0: # work out the ratios r_ratio = float(r2 - r1) / limit g_ratio = float(g2 - g1) / limit b_ratio = float(b2 - b1) / limit # loop through the range of lines, in the right direction modder = 0 for i in range(int(start), int(fin), direction): nr = int(r1 + (r_ratio * modder)) ng = int(g1 + (g_ratio * modder)) nb = int(b1 + (b_ratio * modder)) colour = "#%4.4x%4.4x%4.4x" % (nr, ng, nb) self._canv.create_line( i, 0, i, height, tags=(tags,), fill=colour) modder += 1 self._canv.lower(tags) # draw a midline self._canv.create_line(start, 0, start, height, fill=self._midFill, tags=("midline",)) self._canv.update_idletasks() # function to calculate a tint def tint(self, col, brightness_offset=1): ''' dim or brighten the specified colour by the specified offset ''' # http://chase-seibert.github.io/blog/2011/07/29/python-calculate-lighterdarker-rgb-colors.html rgb_hex = self._canv.winfo_rgb(col) new_rgb_int = [hex_value + brightness_offset for hex_value in rgb_hex] # make sure new values are between 0 and 65535 new_rgb_int = [min([65535, max([0, i])]) for i in new_rgb_int] return new_rgb_int def getWH(self, widg): # ISSUES HERE: # on MAC & LINUX, w_width/w_height always 1 # on WIN, w_height is bigger then r_height - leaving empty space self._canv.update_idletasks() r_width = widg.winfo_reqwidth() r_height = widg.winfo_reqheight() w_width = widg.winfo_width() w_height = widg.winfo_height() max_height = max(r_height, w_height) max_width = max(r_width, w_width) return (max_width, max_height) ##################################### # SplitMeter Class extends the Meter above # Will fill in the empty space with a second fill colour # Two colours should be provided - left & right fill ##################################### class SplitMeter(Meter): def __init__(self, master, width=100, height=20, bg='white', leftfillColour='red', rightfillColour='blue', value=0.0, text=None, font=None, fg='black', *args, **kw): self._leftFill = leftfillColour self._rightFill = rightfillColour Meter.__init__(self, master, width=width, height=height, bg=bg, value=value, text=text, font=font, fg=fg, *args, **kw) # override the handling of fill # list of two colours def configure(self, cnf=None, **kw): kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) if "fill" in kw: cols = kw.pop("fill") if not isinstance(cols, list): raise Exception("SplitMeter requires a list of two colours") else: self._leftFill = cols[0] self._rightFill = cols[1] # propagate any left over confs if PYTHON2: Meter.configure(self, cnf, **kw) else: super(SplitMeter, self).configure(cnf, **kw) def set(self, value=0.0, text=None): # make the value failsafe: value = value / 100.0 if value < 0.0: value = 0.0 elif value > 1.0: value = 1.0 self._value = value self.makeBar() # override the makeBar function def makeBar(self): width, height = self.getWH(self._canv) mid = width * self._value self.drawLines(width, height, 0, mid, self._value, self._leftFill, tags="left") self.drawLines(width, height, mid, width, self._value, self._rightFill, tags="right") ##################################### # SplitMeter Class extends the Meter above # Used to allow bi-directional metering, starting from a mid point # Two colours should be provided - left & right fill # A gradient fill will be applied to the Meter ##################################### class DualMeter(SplitMeter): def __init__(self, master, width=100, height=20, bg='white', leftfillColour='pink', rightfillColour='green', value=None, text=None, font=None, fg='black', *args, **kw): SplitMeter.__init__(self, master, width=width, height=height, bg=bg, leftfillColour=leftfillColour, rightfillColour=rightfillColour, value=value, text=text, font=font, fg=fg, *args, **kw) def set(self, value=[0,0], text=None): if value is None: value=[0,0] if not isinstance(value, list): raise Exception("DualMeter.set() requires a list of two arguments") # make copy, and reduce to decimal vals = [value[0]/100, value[1]/100] # normalise if vals[0] < -1: vals[0] = -1.0 elif vals[0] > 0: vals[0] = vals[0] * -1 if vals[1] > 1.0: vals[1] = 1.0 elif vals[1] < 0: vals[1] = 0 elif vals[1] < -1: vals[1] = -1.0 self._value = vals # if no text is specified use the default percentage string: if text is not None: # set the new text self._canv.itemconfigure(self._text, text=text) self.makeBar() def makeBar(self): # get range to draw lines width, height = self.getWH(self._canv) start = width / 2 l_fin = start + (start * self._value[0]) r_fin = start + (start * self._value[1]) self.drawLines(width, height, start, l_fin, self._value[0], self._leftFill, tags="left") self.drawLines(width, height, start, r_fin, self._value[1], self._rightFill, tags="right") ################################# # TabbedFrame Class ################################# class TabbedFrame(Frame): def __init__( self, master, bg, fill=False, changeOnFocus=True, *args, **kwargs): # main frame & tabContainer inherit BG colour Frame.__init__(self, master, bg=bg) # create two containers self.tabContainer = Frame(self, bg=bg) self.paneContainer = Frame(self, relief=SUNKEN, bd=2, bg=bg, **kwargs) # grid the containers Grid.columnconfigure(self, 0, weight=1) Grid.rowconfigure(self, 1, weight=1) self.fill = fill if self.fill: self.tabContainer.grid(row=0, sticky=W + E) else: self.tabContainer.grid(row=0, sticky=W) self.paneContainer.grid(row=1, sticky="NESW") # nain store dictionary: name = [tab, pane] from collections import OrderedDict self.widgetStore = OrderedDict() self.selectedTab = None self.highlightedTab = None self.changeOnFocus = changeOnFocus self.changeEvent = None # selected tab & all panes self.activeFg = "blue" self.activeBg = "white" # other tabs self.inactiveFg = "black" self.inactiveBg = "grey" # disabled tabs self.disabledFg = "lightGray" self.disabledBg = "darkGray" def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) # configure fgs if "activeforeground" in kw: self.activeFg = kw.pop("activeforeground") for key in list(self.widgetStore.keys()): self.widgetStore[key][0].config(highlightcolor=self.activeFg) if "activebackground" in kw: self.activeBg = kw.pop("activebackground") for key in list(self.widgetStore.keys()): self.widgetStore[key][1].configure(bg=self.activeBg) for child in self.widgetStore[key][1].winfo_children(): gui.SET_WIDGET_BG(child, self.activeBg) if "fg" in kw: self.inactiveFg = kw.pop("fg") if "inactivebackground" in kw: self.inactiveBg = kw.pop("inactivebackground") if "disabledforeground" in kw: self.disabledFg = kw.pop("disabledforeground") if "disabledbackground" in kw: self.disabledBg = kw.pop("disabledbackground") if "bg" in kw: self.tabContainer.configure(bg=kw["bg"]) self.paneContainer.configure(bg=kw["bg"]) if "command" in kw: self.changeEvent = kw.pop("command") # update tabs if we have any if self.selectedTab is not None: self.__colourTabs(False) # propagate any left over confs if PYTHON2: Frame.config(self, cnf, **kw) else: super(Frame, self).config(cnf, **kw) def addTab(self, text, **kwargs): # check for duplicates if text in self.widgetStore: raise Exception("Duplicate tabName: " + text) # create the tab, bind events, pack it in tab = Label( self.tabContainer, text=text, highlightthickness=1, highlightcolor=self.activeFg, relief=RIDGE, cursor="hand2", takefocus=1, **kwargs) tab.disabled = False tab.bind("<Button-1>", lambda *args: self.changeTab(text)) tab.bind("<Return>", lambda *args: self.changeTab(text)) tab.bind("<space>", lambda *args: self.changeTab(text)) tab.bind("<FocusIn>", lambda *args: self.__focusIn(text)) tab.bind("<FocusOut>", lambda *args: self.__focusOut(text)) if self.fill: tab.pack(side=LEFT, ipady=4, ipadx=4, expand=True, fill=BOTH) else: tab.pack(side=LEFT, ipady=4, ipadx=4) # create the pane pane = Frame(self.paneContainer, bg=self.activeBg) pane.grid(sticky="nsew", row=0, column=0) self.paneContainer.grid_columnconfigure(0, weight=1) self.paneContainer.grid_rowconfigure(0, weight=1) # log the first tab as the selected tab if self.selectedTab is None: self.selectedTab = text tab.focus_set() if self.highlightedTab is None: self.highlightedTab = text self.widgetStore[text] = [tab, pane] self.__colourTabs(self.selectedTab) return pane def getTab(self, title): if title not in self.widgetStore.keys(): raise Exception("Invalid tab name: " + title) else: return self.widgetStore[title][1] def expandTabs(self, fill=True): self.fill = fill # update the tabConatiner self.tabContainer.grid_forget() if self.fill: self.tabContainer.grid(row=0, sticky=W + E) else: self.tabContainer.grid(row=0, sticky=W) for key in list(self.widgetStore.keys()): tab = self.widgetStore[key][0] tab.pack_forget() if self.fill: tab.pack(side=LEFT, ipady=4, ipadx=4, expand=True, fill=BOTH) else: tab.pack(side=LEFT, ipady=4, ipadx=4) def __focusIn(self, tabName): if self.changeOnFocus: self.changeTab(tabName) else: self.highlightedTab = tabName self.__colourTabs(False) def __focusOut(self, tabName): self.highlightedTab = None self.__colourTabs(False) def disableAllTabs(self, disabled=True): for tab in self.widgetStore.keys(): self.disableTab(tab, disabled) def disableTab(self, tabName, disabled=True): if tabName not in self.widgetStore.keys(): raise Exception("Invalid tab name: " + tabName) if not disabled: self.widgetStore[tabName][0].disabled = False self.widgetStore[tabName][0].config(cursor="hand2", takefocus=1) else: self.widgetStore[tabName][0].disabled = True self.widgetStore[tabName][0].config(cursor="X_cursor", takefocus=0) if self.highlightedTab == tabName: self.highlightedTab = None # difficult if the active tab is disabled if self.selectedTab == tabName: self.widgetStore[tabName][1].grid_remove() # find an enabled tab self.selectedTab = None for key in list(self.widgetStore.keys()): if not self.widgetStore[key][0].disabled: self.changeTab(key) break self.__colourTabs() def changeTab(self, tabName): # quit changing the tab, if it's already selected if self.focus_get() == self.widgetStore[tabName][0]: return if tabName not in self.widgetStore.keys(): raise Exception("Invalid tab name: " + tabName) if self.widgetStore[tabName][0].disabled: return self.selectedTab = tabName self.highlightedTab = tabName self.widgetStore[tabName][0].focus_set() # this will also regrid the appropriate panes self.__colourTabs() if self.changeEvent is not None: self.changeEvent() def getSelectedTab(self): return self.selectedTab def __colourTabs(self, swap=True): # clear all tabs & remove if necessary for key in list(self.widgetStore.keys()): if self.widgetStore[key][0].disabled: self.widgetStore[key][0]['bg'] = self.disabledBg self.widgetStore[key][0]['fg'] = self.disabledFg else: self.widgetStore[key][0]['bg'] = self.inactiveBg self.widgetStore[key][0]['fg'] = self.inactiveFg if swap: self.widgetStore[key][1].grid_remove() # decorate the highlighted tab if self.highlightedTab is not None: self.widgetStore[self.highlightedTab][0]['fg'] = self.activeFg # now decorate the active tab if self.selectedTab is not None: self.widgetStore[self.selectedTab][0]['bg'] = self.activeBg self.widgetStore[self.selectedTab][0]['fg'] = self.activeFg # and grid it if necessary if swap: self.widgetStore[self.selectedTab][1].grid() ##################################### # Drag Grip Label Class ##################################### class Grip(Label): def __init__(self, *args, **kwargs): Label.__init__(self, bitmap="gray25", *args, **kwargs) self.config(cursor="fleur") self.bind("<ButtonPress-1>", self.StartMove) self.bind("<ButtonRelease-1>", self.StopMove) self.bind("<B1-Motion>", self.OnMotion) def StartMove(self, event): self.x = event.x self.y = event.y def StopMove(self, event): self.x = None self.y = None def OnMotion(self, event): parent = self.winfo_toplevel() deltax = event.x - self.x deltay = event.y - self.y x = parent.winfo_x() + deltax y = parent.winfo_y() + deltay parent.geometry("+%s+%s" % (x, y)) ##################################### # Hyperlink Class ##################################### class Link(Label): def __init__(self, *args, **kwargs): Label.__init__(self, *args, **kwargs) self.config(fg="blue", takefocus=1, highlightthickness=0) self.page = "" self.DEFAULT_TEXT = "" if gui.GET_PLATFORM() == gui.MAC: self.config(cursor="pointinghand") elif gui.GET_PLATFORM() in [gui.WINDOWS, gui.LINUX]: self.config(cursor="hand2") def registerCallback(self, callback): self.bind("<Button-1>", callback) self.bind("<Return>", callback) self.bind("<space>", callback) def launchBrowser(self, event): webbrowser.open_new(r"" + self.page) # webbrowser.open_new_tab(self.page) def registerWebpage(self, page): if not page.startswith("http"): raise InvalidURLError( "Invalid URL: " + page + " (it should begin as http://)") self.page = page self.bind("<Button-1>", self.launchBrowser) self.bind("<Return>", self.launchBrowser) self.bind("<space>", self.launchBrowser) def config(self, **kw): self.configure(**kw) def configure(self, **kw): if "text" in kw: self.DEFAULT_TEXT = kw["text"] if PYTHON2: Label.config(self, **kw) else: super(Label, self).config(**kw) ##################################### # Properties Widget ##################################### class Properties(LabelFrame): def __init__( self, parent, text, props=None, haveLabel=True, *args, **options): if haveLabel: LabelFrame.__init__(self, parent, text=text, *args, **options) else: LabelFrame.__init__(self, parent, text="", *args, **options) self.parent = parent self.config(relief="groove") self.props = {} self.cbs = {} self.title = text self.addProperties(props) def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): # properties to propagate to CheckBoxes vals = ["bg", "fg", "disabledforeground", "state", "font", "command"] kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) # loop through all kw properties received for k, v in kw.items(): if k in vals: # and set them on all CheckBoxes if desired for prop_key in self.cbs: self.cbs[prop_key][k] = v # remove any props the LabelFrame can't handle kw.pop("state", None) kw.pop("disabledforeground", None) kw.pop("command", None) if PYTHON2: LabelFrame.config(self, cnf, **kw) else: super(LabelFrame, self).config(cnf, **kw) def addProperties(self, props): if props is not None: for k in sorted(props): self.addProperty(k, props[k]) def addProperty(self, prop, value=False): if prop in self.props: if value is None: del self.props[prop] self.cbs[prop].pack_forget() del self.cbs[prop] else: self.props[prop].set(value) elif prop is not None: var = BooleanVar() var.set(value) cb = Checkbutton(self) cb.config( anchor=W, text=prop, variable=var, bg=self.cget("bg"), font=self.cget("font"), fg=self.cget("fg")) cb.pack(fill="x", expand=1) self.props[prop] = var self.cbs[prop] = cb else: raise Exception("Can't add a None property to: ", prop) # if text is not None: lab.config ( text=text ) def getProperties(self): vals = {} for k, v in self.props.items(): vals[k] = bool(v.get()) return vals def getProperty(self, prop): if prop in self.props: return bool(self.props[prop].get()) else: raise Exception( "Property: " + str(prop) + " not found in Properties: " + self.title) ##################################### # appJar Frame ##################################### class ajFrame(Frame): def __init__(self, parent, *args, **options): Frame.__init__(self, parent, *args, **options) ##################################### # Simple Separator ##################################### class Separator(Frame): def __init__(self, parent, orient="horizontal", *args, **options): Frame.__init__(self, parent, *args, **options) self.line = Frame(self) if orient == "horizontal": self.line.config( relief="ridge", height=2, width=100, borderwidth=1) self.line.pack(padx=5, pady=5, fill="x", expand=1) else: self.line.config( relief="ridge", height=100, width=2, borderwidth=1) self.line.pack(padx=5, pady=5, fill="y", expand=1) def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): if "fg" in kw: self.line.config(bg=kw.pop("fg")) if PYTHON2: Frame.config(self, cnf, **kw) else: super(Frame, self).config(cnf, **kw) ##################################### # Pie Chart Class ##################################### class PieChart(Canvas): # constant for available colours COLOURS = [ "#023fa5", "#7d87b9", "#bec1d4", "#d6bcc0", "#bb7784", "#8e063b", "#4a6fe3", "#8595e1", "#b5bbe3", "#e6afb9", "#e07b91", "#d33f6a", "#11c638", "#8dd593", "#c6dec7", "#ead3c6", "#f0b98d", "#ef9708", "#0fcfc0", "#9cded6", "#d5eae7", "#f3e1eb", "#f6c4e1", "#f79cd4"] def __init__(self, container, fracs, bg="green"): Canvas.__init__(self, container, bd=0, highlightthickness=0, bg=bg) self.fracs = fracs self.arcs = [] self.__drawPie() self.bind("<Configure>", self.__drawPie) def __drawPie(self, event=None): # remove the existing arcs for arc in self.arcs: self.delete(arc) self.arcs = [] # get the width * height w = self.winfo_width() h = self.winfo_height() # scale h&w - so they don;t hit the edges min_w = w * .05 max_w = w * .95 min_h = h * .05 max_h = h * .95 # if we're not in a square # adjust them to make sure we get a circle if w > h: extra = (w * .9 - h * .9) / 2.0 min_w += extra max_w -= extra elif h > w: extra = (h * .9 - w * .9) / 2.0 min_h += extra max_h -= extra coord = min_w, min_h, max_w, max_h pos = col = 0 for key, val in self.fracs.items(): sliceId = "slice" + str(col) arc = self.create_arc( coord, fill=self.COLOURS[ col % len( self.COLOURS)], start=self.frac(pos), extent=self.frac(val), activedash=( 3, 5), activeoutline="grey", activewidth=3, tag=( sliceId, ), width=1) self.arcs.append(arc) # generate a tooltip if TOOLTIP_AVAILABLE: frac = int(val / sum(self.fracs.values()) * 100) tip = key + ": " + str(val) + " (" + str(frac) + "%)" tt = ToolTip( self, tip, delay=500, follow_mouse=1, specId=sliceId) pos += val col += 1 def frac(self, curr): return 360. * curr / sum(self.fracs.values()) def setValue(self, name, value): if value == 0 and name in self.fracs: del self.fracs[name] else: self.fracs[name] = value self.__drawPie() ##################################### # Tree Widget Class # https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch11s11.html # idlelib -> TreeWidget.py # modify minidom - https://wiki.python.org/moin/MiniDom ##################################### class ajTreeNode(TreeNode): def __init__(self, canvas, parent, item): TreeNode.__init__(self, canvas, parent, item) self.bgColour = None self.fgColour = None self.bgHColour = None self.fgHColour = None # called (if set) when a leaf is edited self.editEvent = None if self.parent: self.bgColour = self.parent.bgColour self.fgColour = self.parent.fgColour self.bgHColour = self.parent.bgHColour self.fgHColour = self.parent.fgHColour self.editEvent = self.parent.editEvent def registerEditEvent(self, func): self.editEvent = func for c in self.children: c.registerEditEvent(func) def setBgColour(self, colour): self.canvas.config(background=colour) self.bgColour = colour self.__doUpdateColour() def setFgColour(self, colour): self.fgColour = colour self.__doUpdateColour() def setBgHColour(self, colour): self.bgHColour = colour self.__doUpdateColour() def setFgHColour(self, colour): self.fgHColour = colour self.__doUpdateColour() def __doUpdateColour(self): self.__updateColours( self.bgColour, self.bgHColour, self.fgColour, self.fgHColour) self.update() def __updateColours(self, bgCol, bgHCol, fgCol, fgHCol): self.bgColour = bgCol self.fgColour = fgCol self.bgHColour = bgHCol self.fgHColour = fgHCol for c in self.children: c.__updateColours(bgCol, bgHCol, fgCol, fgHCol) # override parent function, so that we can change the label's background # colour def drawtext(self): if PYTHON2: TreeNode.drawtext(self) else: super(ajTreeNode, self).drawtext() self.colourLabels() # override parent function, so that we can generate an event on finish # editing def edit_finish(self, event=None): if PYTHON2: TreeNode.edit_finish(self, event) else: super(ajTreeNode, self).edit_finish(event) if self.editEvent is not None: self.editEvent() def colourLabels(self): try: if not self.selected: self.label.config(background=self.bgColour, fg=self.fgColour) else: self.label.config(background=self.bgHColour, fg=self.fgHColour) except: pass def getSelectedText(self): item = self.getSelected() if item is not None: return item.GetText() else: return None def getSelected(self): if self.selected: return self.item else: for c in self.children: val = c.getSelected() if val is not None: return val return None # implementation of container for XML data # functions implemented as specified in skeleton class ajTreeData(TreeItem): def __init__(self, node): self.node = node self.dblClickFunc = None self.canEdit = True # REQUIRED FUNCTIONS # called whenever the tree expands def GetText(self): node = self.node if node.nodeType == node.ELEMENT_NODE: return node.nodeName elif node.nodeType == node.TEXT_NODE: return node.nodeValue def IsEditable(self): return self.canEdit and not self.node.hasChildNodes() def SetText(self, text): self.node.replaceWholeText(text) def IsExpandable(self): return self.node.hasChildNodes() def GetIconName(self): if not self.IsExpandable(): return "python" # change to file icon def GetSubList(self): children = self.node.childNodes prelist = [ajTreeData(node) for node in children] itemList = [item for item in prelist if item.GetText().strip()] for item in itemList: item.registerDblClick(self.dblClickFunc) item.canEdit = self.canEdit return itemList def OnDoubleClick(self): if self.IsEditable(): # TO DO: start editing this node... pass if self.dblClickFunc is not None: self.dblClickFunc() #  EXTRA FUNCTIONS # TODO: can only set before calling go() def setCanEdit(self, value=True): self.canEdit = value # TODO: can only set before calling go() def registerDblClick(self, func): self.dblClickFunc = func # not used - for DEBUG def getSelected(self, spaces=1): if spaces == 1: print(self.node.tagName) for c in self.node.childNodes: if c.__class__.__name__ == "Element": print(" " * spaces, ">>", c.tagName) node = ajTreeData(c) node.getSelected(spaces + 2) elif c.__class__.__name__ == "Text": val = c.data.strip() if len(val) > 0: print(" " * spaces, ">>>>", val) ##################################### # errors ##################################### class ItemLookupError(LookupError): '''raise this when there's a lookup error for my app''' pass class InvalidURLError(ValueError): '''raise this when there's a lookup error for my app''' pass ##################################### # ToggleFrame - collapsable frame # http://stackoverflow.com/questions/13141259/expandable-and-contracting-frame-in-tkinter ##################################### class ToggleFrame(Frame): def __init__(self, parent, title="", *args, **options): Frame.__init__(self, parent, *args, **options) self.config(relief="raised", borderwidth=2, padx=5, pady=5) self.showing = True self.titleFrame = Frame(self) self.titleFrame.config(bg="DarkGray") self.titleLabel = Label(self.titleFrame, text=title) self.titleLabel.config(font="-weight bold") self.toggleButton = Button( self.titleFrame, width=2, text='-', command=self.toggle) self.subFrame = Frame(self, relief="sunken", borderwidth=2) self.configure(bg="DarkGray") self.grid_columnconfigure(0, weight=1) self.titleFrame.grid(row=0, column=0, sticky=EW) self.titleFrame.grid_columnconfigure(0, weight=1) self.titleLabel.grid(row=0, column=0) self.toggleButton.grid(row=0, column=1) self.subFrame.grid(row=1, column=0, sticky=EW) def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) if "font" in kw: self.titleLabel.config(font=kw["font"]) self.toggleButton.config(font=kw["font"]) del(kw["font"]) if "bg" in kw: self.titleFrame.config(bg=kw["bg"]) self.titleLabel.config(bg=kw["bg"]) self.subFrame.config(bg=kw["bg"]) if gui.GET_PLATFORM() == gui.MAC: self.toggleButton.config(highlightbackground=kw["bg"]) if "state" in kw: if kw["state"] == "disabled": if self.showing: self.toggle() self.toggleButton.config(state=kw["state"]) del(kw["state"]) if PYTHON2: Frame.config(self, cnf, **kw) else: super(Frame, self).config(cnf, **kw) def toggle(self): if not self.showing: self.subFrame.grid() self.toggleButton.configure(text='-') else: self.subFrame.grid_remove() self.toggleButton.configure(text='+') self.showing = not self.showing def getContainer(self): return self.subFrame def stop(self): self.update_idletasks() self.titleFrame.config(width=self.winfo_reqwidth()) self.toggle() def isShowing(self): return self.showing ##################################### # Paged Window ##################################### class PagedWindow(Frame): def __init__(self, parent, title=None, **opts): # call the super constructor Frame.__init__(self, parent, **opts) self.config(width=300, height=400) # globals to hold list of frames(pages) and current page self.currentPage = -1 self.frames = [] self.shouldShowLabel = True self.shouldShowTitle = True self.title = title self.navPos = 1 self.maxX = 0 self.maxY = 0 self.pageChangeEvent = None # create the 3 components, including a default container frame self.titleLabel = Label(self) self.prevButton = Button( self, text="PREVIOUS", command=self.showPrev, state="disabled", width=10) self.nextButton = Button( self, text="NEXT", command=self.showNext, state="disabled", width=10) self.prevButton.bind("<Control-Button-1>", self.showFirst) self.nextButton.bind("<Control-Button-1>", self.showLast) self.posLabel = Label(self, width=8) self.grid_rowconfigure(0, weight=0) self.grid_rowconfigure(1, weight=1) self.grid_rowconfigure(2, weight=0) self.grid_columnconfigure(0, weight=1) self.grid_columnconfigure(1, weight=1) self.grid_columnconfigure(2, weight=1) # grid the navigation components self.prevButton.grid( row=self.navPos + 1, column=0, sticky=N + S + W, padx=5, pady=( 0, 5)) self.posLabel.grid( row=self.navPos + 1, column=1, sticky=N + S + E + W, padx=5, pady=( 0, 5)) self.nextButton.grid( row=self.navPos + 1, column=2, sticky=N + S + E, padx=5, pady=( 0, 5)) # show the title if self.title is not None and self.shouldShowTitle: self.titleLabel.config(text=self.title, font="-weight bold") self.titleLabel.grid( row=0, column=0, columnspan=3, sticky=N + W + E) # show the label self.__setLabel() def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) if "bg" in kw: if gui.GET_PLATFORM() == gui.MAC: self.prevButton.config(highlightbackground=kw["bg"]) self.nextButton.config(highlightbackground=kw["bg"]) self.posLabel.config(bg=kw["bg"]) self.titleLabel.config(bg=kw["bg"]) if "fg" in kw: self.poslabel.config(fg=kw["fg"]) self.titleLabel.config(fg=kw["fg"]) kw.pop("fg") if PYTHON2: Frame.config(self, cnf, **kw) else: super(Frame, self).config(cnf, **kw) # def setBg(self, colour): # self.config(bg=colour) # # def setFg(self, colour): # self.poslabel.config(fg=colour) # self.titleLabel.config(fg=colour) # functions to change the labels of the two buttons def setPrevButton(self, title): self.prevButton.config(text=title) def setNextButton(self, title): self.nextButton.config(text=title) def setNavPositionTop(self, top=True): oldNavPos = self.navPos pady = (0, 5) if top: self.navPos = 0 else: self.navPos = 1 if oldNavPos != self.navPos: if self.navPos == 0: self.grid_rowconfigure(1, weight=0) self.grid_rowconfigure(2, weight=1) pady = (5, 0) else: self.grid_rowconfigure(1, weight=1) self.grid_rowconfigure(2, weight=0) # grid the navigation components self.frames[self.currentPage].grid_remove() self.prevButton.grid_remove() self.posLabel.grid_remove() self.nextButton.grid_remove() self.frames[self.currentPage].grid( row=int(not self.navPos) + 1, column=0, columnspan=3, sticky=N + S + E + W, padx=5, pady=5) self.prevButton.grid( row=self.navPos + 1, column=0, sticky=S + W, padx=5, pady=pady) self.posLabel.grid( row=self.navPos + 1, column=1, sticky=S + E + W, padx=5, pady=pady) self.nextButton.grid( row=self.navPos + 1, column=2, sticky=S + E, padx=5, pady=pady) def showLabel(self, val=True): self.shouldShowLabel = val self.__setLabel() def setTitle(self, title): self.title = title self.showTitle() def showTitle(self, val=True): self.shouldShowTitle = val if self.title is not None and self.shouldShowTitle: self.titleLabel.config(text=self.title, font="-weight bold") self.titleLabel.grid( row=0, column=0, columnspan=3, sticky=N + W + E) else: self.titleLabel.grid_remove() # function to update the contents of the label def __setLabel(self): if self.shouldShowLabel: self.posLabel.config( text=str(self.currentPage + 1) + "/" + str(len(self.frames))) else: self.posLabel.config(text="") # get the current frame being shown - for adding widgets def getPage(self): return self.frames[self.currentPage] # get the named frame - for adding widgets def getPage(self, num): return self.frames[num] # get current page number def getPageNumber(self): return self.currentPage + 1 # register a function to call when the page changes def registerPageChangeEvent(self, event): self.pageChangeEvent = event # adds a new page, making it visible def addPage(self): # if we're showing a page, remove it if self.currentPage >= 0: self.__updateMaxSize() self.frames[self.currentPage].grid_forget() # add a new page self.frames.append(Page(self)) self.frames[-1].grid(row=int(not self.navPos) + 1, column=0, columnspan=3, sticky=N + S + E + W, padx=5, pady=5) self.currentPage = len(self.frames) - 1 # update the buttons & labels if self.currentPage > 0: self.prevButton.config(state="normal") self.__setLabel() return self.frames[-1] def stopPage(self): self.__updateMaxSize() self.showPage(1) def __updateMaxSize(self): self.frames[self.currentPage].update_idletasks() x = self.frames[self.currentPage].winfo_reqwidth() y = self.frames[self.currentPage].winfo_reqheight() if x > self.maxX: self.maxX = x if y > self.maxY: self.maxY = y # function to display the specified page # will re-grid, and disable/enable buttons # also updates label def showPage(self, page): if page < 1 or page > len(self.frames): raise Exception("Invalid page number: " + str(page) + ". Must be between 1 and " + str(len(self.frames))) self.frames[self.currentPage].grid_forget() self.currentPage = page - 1 self.frames[self.currentPage].grid_propagate(False) self.frames[ self.currentPage].grid( row=int( not self.navPos) + 1, column=0, columnspan=3, sticky=N + S + E + W, padx=5, pady=5) self.frames[self.currentPage].grid_columnconfigure(0, weight=1) self.frames[self.currentPage].config(width=self.maxX, height=self.maxY) self.__setLabel() # update the buttons if len(self.frames) == 1: # only 1 page - no buttons self.prevButton.config(state="disabled") self.nextButton.config(state="disabled") elif self.currentPage == 0: self.prevButton.config(state="disabled") self.nextButton.config(state="normal") elif self.currentPage == len(self.frames) - 1: self.prevButton.config(state="normal") self.nextButton.config(state="disabled") else: self.prevButton.config(state="normal") self.nextButton.config(state="normal") def showFirst(self, event=None): if self.currentPage == 0: self.bell() else: self.showPage(1) if self.pageChangeEvent is not None: self.pageChangeEvent() def showLast(self, event=None): if self.currentPage == len(self.frames) - 1: self.bell() else: self.showPage(len(self.frames)) if self.pageChangeEvent is not None: self.pageChangeEvent() def showPrev(self, event=None): if self.currentPage > 0: self.showPage(self.currentPage) if self.pageChangeEvent is not None: self.pageChangeEvent() else: self.bell() def showNext(self, event=None): if self.currentPage < len(self.frames) - 1: self.showPage(self.currentPage + 2) if self.pageChangeEvent is not None: self.pageChangeEvent() else: self.bell() class Page(Frame): def __init__(self, parent, **opts): Frame.__init__(self, parent) self.config(relief=RIDGE, borderwidth=2) self.container = parent ######################### # Class to provide auto-completion on Entry boxes # inspired by: https://gist.github.com/uroshekic/11078820 ######################### class AutoCompleteEntry(Entry): def __init__(self, words, *args, **kwargs): Entry.__init__(self, *args, **kwargs) self.allWords = words self.allWords.sort() # store variable - so we can see when it changes self.var = self["textvariable"] = StringVar() self.var.trace('w', self.textChanged) # register events self.bind("<Right>", self.selectWord) self.bind("<Return>", self.selectWord) self.bind("<Up>", self.moveUp) self.bind("<Down>", self.moveDown) self.bind("<FocusOut>", self.closeList, add="+") # no list box - yet self.listBoxShowing = False # function to see if words match def checkMatch(self, fieldValue, acListEntry): pattern = re.compile(re.escape(fieldValue) + '.*', re.IGNORECASE) return re.match(pattern, acListEntry) # function to get all matches as a list def getMatches(self): return [w for w in self.allWords if self.checkMatch(self.var.get(), w)] # called when typed in entry def textChanged(self, name, index, mode): # if no text - close list if self.var.get() == '': self.closeList() else: if not self.listBoxShowing: self.makeListBox() self.popListBox() # add words to the list def popListBox(self): if self.listBoxShowing: self.listbox.delete(0, END) shownWords = self.getMatches() if shownWords: for w in shownWords: self.listbox.insert(END, w) self.selectItem(0) # function to create & show an empty list box def makeListBox(self): self.listbox = Listbox(width=self["width"], height=8) self.listbox.bind("<Button-1>", self.mouseClickBox) self.listbox.bind("<Right>", self.selectWord) self.listbox.bind("<Return>", self.selectWord) self.listbox.place( x=self.winfo_x(), y=self.winfo_y() + self.winfo_height()) self.listBoxShowing = True # function to handle a mouse click in the list box def mouseClickBox(self, e=None): self.selectItem(self.listbox.nearest(e.y)) self.selectWord(e) # function to close/delete list box def closeList(self, event=None): if self.listBoxShowing: self.listbox.destroy() self.listBoxShowing = False # copy word from list to entry, close list def selectWord(self, event): if self.listBoxShowing: self.var.set(self.listbox.get(ACTIVE)) self.icursor(END) self.closeList() return "break" # wrappers for up/down arrows def moveUp(self, event): return self.arrow("UP") def moveDown(self, event): return self.arrow("DOWN") # function for handling up/down keys def arrow(self, direction): if not self.listBoxShowing: self.makeListBox() self.popListBox() curItem = 0 numItems = self.listbox.size() else: numItems = self.listbox.size() curItem = self.listbox.curselection() if curItem == (): curItem = -1 else: curItem = int(curItem[0]) if direction == "UP" and curItem > 0: curItem -= 1 elif direction == "UP" and curItem <= 0: curItem = numItems - 1 elif direction == "DOWN" and curItem < numItems - 1: curItem += 1 elif direction == "DOWN" and curItem == numItems - 1: curItem = 0 self.selectItem(curItem) # stop the event propgating return "break" # function to select the specified item def selectItem(self, position): numItems = self.listbox.size() self.listbox.selection_clear(0, numItems - 1) self.listbox.see(position) # Scroll! self.listbox.selection_set(first=position) self.listbox.activate(position) ##################################### # Named classes for containing groups ##################################### class LabelBox(Frame): def __init__(self, parent, **opts): Frame.__init__(self, parent) self.theLabel = None self.theWidget = None class WidgetBox(Frame): def __init__(self, parent, **opts): Frame.__init__(self, parent) self.theWidgets = [] class ListBox(Frame): def __init__(self, parent, **opts): Frame.__init__(self, parent) class Pane(Frame): def __init__(self, parent, **opts): Frame.__init__(self, parent) ##################################### # scrollable frame... # http://effbot.org/zone/tkinter-autoscrollbar.htm ##################################### class AutoScrollbar(Scrollbar): def __init__(self, parent, **opts): Scrollbar.__init__(self, parent, **opts) # a scrollbar that hides itself if it's not needed # only works if you use the grid geometry manager def set(self, lo, hi): if float(lo) <= 0.0 and float(hi) >= 1.0: # grid_remove is currently missing from Tkinter! self.tk.call("grid", "remove", self) else: self.grid() Scrollbar.set(self, lo, hi) def pack(self, **kw): raise Exception("cannot use pack with this widget") def place(self, **kw): raise Exception("cannot use place with this widget") ####################### # Widget to look like a label, but allow selection... ####################### class SelectableLabel(Entry): def __init__(self, parent, **opts): Entry.__init__(self, parent) self.configure(relief=FLAT, state="readonly", readonlybackground='white', fg='black') # var = parent.StringVar() # self.configure(textvariable=var) ####################### # Frame with built in scrollbars and canvas for placing stuff on # http://effbot.org/zone/tkinter-autoscrollbar.htm # Modified with help from idlelib TreeWidget.py ####################### class ScrollPane(Frame): def __init__(self, parent, **opts): Frame.__init__(self, parent) self.config(padx=5, pady=5) # make the ScrollPane expandable self.grid_rowconfigure(0, weight=1) self.grid_columnconfigure(0, weight=1) vscrollbar = Scrollbar(self) hscrollbar = Scrollbar(self, orient=HORIZONTAL) opts['yscrollcommand'] = vscrollbar.set opts['xscrollcommand'] = hscrollbar.set self.canvas = Canvas(self, **opts) self.canvas.config(highlightthickness=0) vscrollbar.grid(row=0, column=1, sticky=N + S + E) hscrollbar.grid(row=1, column=0, sticky=E + W + S) self.canvas.grid(row=0, column=0, sticky=N + S + E + W) vscrollbar.config(command=self.canvas.yview) hscrollbar.config(command=self.canvas.xview) self.canvas.bind("<Key-Prior>", self.__keyPressed) self.canvas.bind("<Key-Next>", self.__keyPressed) self.canvas.bind("<Key-Up>", self.__keyPressed) self.canvas.bind("<Key-Down>", self.__keyPressed) self.canvas.bind("<Key-Left>", self.__keyPressed) self.canvas.bind("<Key-Right>", self.__keyPressed) self.canvas.bind("<Home>", self.__keyPressed) self.canvas.bind("<End>", self.__keyPressed) self.canvas.bind("<Enter>", self.__mouseEnter) self.canvas.bind("<Leave>", self.__mouseLeave) self.b_ids = [] self.canvas.focus_set() self.interior = Frame(self.canvas) self.interior_id = self.canvas.create_window( 0, 0, window=self.interior, anchor=NW) self.interior.bind('<Configure>', self.__configureInterior) # track changes to the canvas and frame width and sync them, # http://www.scriptscoop2.com/t/35d742299f35/python-tkinter-scrollbar-for-frame.html def __configureInterior(self, event): size = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight()) self.canvas.config(scrollregion="0 0 %s %s" % size) # unbind any saved bind ids def __unbindIds(self): if len(self.b_ids) == 0: return if gui.GET_PLATFORM() == gui.LINUX: self.canvas.unbind("<4>", self.b_ids[0]) self.canvas.unbind("<5>", self.b_ids[1]) self.canvas.unbind("<Shift-4>", self.b_ids[2]) self.canvas.unbind("<Shift-5>", self.b_ids[3]) else: # Windows and MacOS self.canvas.unbind("<MouseWheel>", self.b_ids[0]) self.canvas.unbind("<Shift-MouseWheel>", self.b_ids[1]) self.b_ids = [] # bind mouse scroll to this widget only when mouse is over def __mouseEnter(self, event): self.__unbindIds() if gui.GET_PLATFORM() == gui.LINUX: self.b_ids.append(self.canvas.bind_all("<4>", self.__vertMouseScroll)) self.b_ids.append(self.canvas.bind_all("<5>", self.__vertMouseScroll)) self.b_ids.append(self.canvas.bind_all("<Shift-4>", self.__horizMouseScroll)) self.b_ids.append(self.canvas.bind_all("<Shift-5>", self.__horizMouseScroll)) else: # Windows and MacOS self.b_ids.append(self.canvas.bind_all("<MouseWheel>", self.__vertMouseScroll)) self.b_ids.append(self.canvas.bind_all("<Shift-MouseWheel>", self.__horizMouseScroll)) # remove mouse scroll binding, when mouse leaves def __mouseLeave(self, event): self.__unbindIds() def __horizMouseScroll(self, event): self.__mouseScroll(True, event) def __vertMouseScroll(self, event): self.__mouseScroll(False, event) def __mouseScroll(self, horiz, event): direction = 0 # get direction if event.num == 4: direction = -1 elif event.num == 5: direction = 1 elif event.delta > 100: direction = int(-1 * (event.delta/120)) elif event.delta > 0: direction = -1 * event.delta elif event.delta < -100: direction = int(-1 * (event.delta/120)) elif event.delta < 0: direction = -1 * event.delta else: return # shouldn't happen if horiz: self.canvas.xview_scroll(direction, "units") else: self.canvas.yview_scroll(direction, "units") def getPane(self): return self.canvas def __keyPressed(self, event): # work out if alt/ctrl/shift are pressed state = event.state ctrl = (state & 0x4) != 0 alt = (state & 0x8) != 0 or (state & 0x80) != 0 # buggy shift = (state & 0x1) != 0 if event.type == "2": # up and down arrows if event.keysym == "Up": # event.keycode == 38 if ctrl: self.canvas.yview_scroll(-1, "pages") else: self.canvas.yview_scroll(-1, "units") elif event.keysym == "Down": # event.keycode == 40 if ctrl: self.canvas.yview_scroll(1, "pages") else: self.canvas.yview_scroll(1, "units") # left and right arrows elif event.keysym == "Left": # event.keycode == 37 if ctrl: self.canvas.xview_scroll(-1, "pages") else: self.canvas.xview_scroll(-1, "units") elif event.keysym == "Right": # event.keycode == 39 if ctrl: self.canvas.xview_scroll(1, "pages") else: self.canvas.xview_scroll(1, "units") # page-up & page-down keys elif event.keysym == "Prior": # event.keycode == 33 if ctrl: self.canvas.xview_scroll(-1, "pages") else: self.canvas.yview_scroll(-1, "pages") elif event.keysym == "Next": # event.keycode == 34 if ctrl: self.canvas.xview_scroll(1, "pages") else: self.canvas.yview_scroll(1, "pages") # home & end keys elif event.keysym == "Home": # event.keycode == 36 if ctrl: self.canvas.xview_moveto(0.0) else: self.canvas.yview_moveto(0.0) elif event.keysym == "End": # event.keycode == 35 if ctrl: self.canvas.xview_moveto(1.0) else: self.canvas.yview_moveto(1.0) return "break" else: pass # shouldn't happen ################################# # Additional Dialog Classes ################################# # the main dialog class to be extended class Dialog(Toplevel): def __init__(self, parent, title=None): Toplevel.__init__(self, parent) self.transient(parent) parent.POP_UP = self if title: self.title(title) self.parent = parent self.result = None # create a frame to hold the contents body = Frame(self) self.initial_focus = self.body(body) body.pack(padx=5, pady=5) # create the buttons self.buttonbox() self.grab_set() if not self.initial_focus: self.initial_focus = self self.protocol("WM_DELETE_WINDOW", self.cancel) gui.CENTER(self) self.initial_focus.focus_set() self.wait_window(self) # override to create the contents of the dialog # should return the widget to give focus to def body(self, master): pass # add standard buttons # override if you don't want the standard buttons def buttonbox(self): box = Frame(self) w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE) w.pack(side=LEFT, padx=5, pady=5) w = Button(box, text="Cancel", width=10, command=self.cancel) w.pack(side=LEFT, padx=5, pady=5) self.bind("<Return>", self.ok) self.bind("<Escape>", self.cancel) box.pack() # called when ok button pressed def ok(self, event=None): # only continue if validate() returns True if not self.validate(): self.initial_focus.focus_set() # put focus back return self.withdraw() self.update_idletasks() # call the validate function before calling the cancel function self.apply() self.cancel() # called when cancel button pressed def cancel(self, event=None): self.parent.focus_set() # give focus back to the parent self.destroy() # override this to cancel closing the form def validate(self): return True # override this to do something before closing def apply(self): pass # a base class for a simple data capture dialog class SimpleEntryDialog(Dialog): def __init__(self, parent, title, question): self.error = False self.question = question if PYTHON2: Dialog.__init__(self, parent, title) else: super(SimpleEntryDialog, self).__init__(parent, title) def clearError(self, e): if self.error: self.error = False self.l1.config(text="") def setError(self, message): self.parent.bell() self.error = True self.l1.config(text=message) # a label for the question, an entry for the answer # a label for an error message def body(self, master): Label(master, text=self.question).grid(row=0) self.e1 = Entry(master) self.l1 = Label(master, fg="red") self.e1.grid(row=1) self.l1.grid(row=2) self.e1.bind("<Key>", self.clearError) return self.e1 # captures a string - must not be empty class TextDialog(SimpleEntryDialog): def __init__(self, parent, title, question): if PYTHON2: SimpleEntryDialog.__init__(self, parent, title, question) else: super(TextDialog, self).__init__(parent, title, question) def validate(self): res = self.e1.get() if len(res.strip()) == 0: self.setError("Invalid text.") return False else: self.result = res return True # captures a number - must be a valid float class NumDialog(SimpleEntryDialog): def __init__(self, parent, title, question): if PYTHON2: SimpleEntryDialog.__init__(self, parent, title, question) else: super(NumDialog, self).__init__(parent, title, question) def validate(self): res = self.e1.get() try: self.result = float(res) if '.' in res else int(res) return True except ValueError: self.setError("Invalid number.") return False ##################################### # Toplevel Stuff ##################################### class SubWindow(Toplevel): def __init__(self): Toplevel.__init__(self) self.escapeBindId = None # used to exit fullscreen self.stopFunction = None # used to stop self.geometry("+%d+%d" % (100, 100)) # removed for python2.7 # def __getattr__(self, name): # def handlerFunction(*args, **kwargs): # print("Unknown function:", name, args, kwargs) # return handlerFunction ##################################### # SimpleGrid Stuff ##################################### # first row is used as a header class SimpleGrid(Frame): def config(self, cnf=None, **kw): self.configure(cnf, **kw) def configure(self, cnf=None, **kw): kw = gui.CLEAN_CONFIG_DICTIONARY(**kw) if "bg" in kw: self.mainCanvas.config(bg=kw["bg"]) if "activebackground" in kw: self.cellSelectedBg = kw.pop("activebackground") if "inactivebackground" in kw: self.cellBg = kw.pop("inactivebackground") if "font" in kw: font = kw.pop("font") self.gdFont.configure( family=font.actual("family"), size=font.actual("size")) self.ghFont.configure( family=font.actual("family"), size=font.actual("size") + 2, weight="bold") if "buttonFont" in kw: buttonFont = kw.pop("buttonFont") self.buttonFont.configure( family=buttonFont.actual("family"), size=buttonFont.actual("size")) def __init__(self, parent, title, data, action=None, addRow=False, **opts): if "buttonFont" in opts: self.buttonFont = opts.pop("buttonFont") else: self.buttonFont = font.Font(family="Helvetica", size=12) Frame.__init__(self, parent, **opts) if "font" in opts: self.gdFont = opts["font"] self.ghFont = opts["font"] self.ghFont.configure( size=self.ghFont.actual("size") + 2, weight="bold") else: self.gdFont = font.Font(family="Helvetica", size=12) self.ghFont = font.Font(family="Helvetica", size=14, weight="bold") # store them in the frame object for access, later self.action = action self.entries = [] self.numColumns = 0 self.numRows = len(data) # find out the max number of cells in a row for rowNum in range(self.numRows): if len(data[rowNum]) > self.numColumns: self.numColumns = len(data[rowNum]) # a list of any selected cells from collections import OrderedDict self.selectedCells = OrderedDict() # colours self.cellHeadingBg = "DarkGray" # HEADING BG self.cellBg = "LightCyan" # CELL BG self.cellOverBg = "Silver" # mouse over BG self.cellSelectedBg = "LightGray" # selected cell BG # add a canvas for scrolling self.mainCanvas = Canvas( self, borderwidth=0, highlightthickness=2, bg=self.cget("bg")) vsb = Scrollbar(self, orient="vertical", command=self.mainCanvas.yview) hsb = Scrollbar( self, orient="horizontal", command=self.mainCanvas.xview) # pack them in vsb.pack(side="right", fill="y") hsb.pack(side="bottom", fill="x") self.mainCanvas.pack(side="left", fill="both", expand=True) # add the grid cpntainer to the frame self.gridContainer = Frame(self.mainCanvas) self.mainCanvas.create_window( (4, 4), window=self.gridContainer, anchor="nw", tags="self.gridContainer") self.gridContainer.bind("<Configure>", self.__refreshGrids) # configure scrollCommands self.mainCanvas.configure(yscrollcommand=vsb.set) self.mainCanvas.configure(xscrollcommand=hsb.set) # bind scroll events if gui.GET_PLATFORM() == gui.LINUX: self.mainCanvas.bind_all( "<4>", lambda event, arg=title: self.__scrollGrid( event, arg)) self.mainCanvas.bind_all( "<5>", lambda event, arg=title: self.__scrollGrid( event, arg)) else: # Windows and MacOS self.mainCanvas.bind_all( "<MouseWheel>", lambda event, arg=title: self.__scrollGrid( event, arg)) self.__addRows(data, addRow) def __addRows(self, data, addEntryRow=False): # loop through each row for rowNum in range(self.numRows): self.__addRow(rowNum, data[rowNum]) # add a row of entry boxes... if addEntryRow: self.__addEntryBoxes() def addRow(self, rowData): self.__removeEntryBoxes() self.__addRow(self.numRows, rowData) self.numRows += 1 self.__addEntryBoxes() def __addRow(self, rowNum, rowData): celContents = [] # then the cells in that row for cellNum in range(self.numColumns): # get a val ("" if no val) if cellNum >= len(rowData): val = "" else: val = rowData[cellNum] celContents.append(val) lab = Label(self.gridContainer) if rowNum == 0: lab.configure( relief=RIDGE, text=val, font=self.ghFont, background=self.cellHeadingBg) else: lab.configure( relief=RIDGE, text=val, font=self.gdFont, background=self.cellBg) lab.bind("<Enter>", self.__gridCellEnter) lab.bind("<Leave>", self.__gridCellLeave) lab.bind("<Button-1>", self.__gridCellClick) gridPos = str(rowNum - 1) + "-" + str(cellNum) self.selectedCells[gridPos] = False lab.gridPos = gridPos lab.grid(row=rowNum, column=cellNum, sticky=N + E + S + W) Grid.columnconfigure(self.gridContainer, cellNum, weight=1) Grid.rowconfigure(self.gridContainer, rowNum, weight=1) # add some buttons for each row if self.action is not None: widg = Label(self.gridContainer, relief=RIDGE, height=2) # add the title if rowNum == 0: widg.configure( text="Action", font=self.ghFont, background=self.cellHeadingBg) # add a button else: but = Button( widg, font=self.buttonFont, text="Press", command=gui.MAKE_FUNC( self.action, celContents)) but.place(relx=0.5, rely=0.5, anchor=CENTER) widg.grid(row=rowNum, column=cellNum + 1, sticky=N + E + S + W) def __removeEntryBoxes(self): for e in self.entries: e.lab.grid_forget() e.place_forget() self.ent_but.lab.grid_forget() self.ent_but.place_forget() def __addEntryBoxes(self): self.entries = [] for cellNum in range(self.numColumns): name = "GR" + str(cellNum) lab = Label(self.gridContainer, relief=RIDGE, width=6, height=2) lab.grid(row=self.numRows, column=cellNum, sticky=N + E + S + W) # self.__buildEntry(name, self.gridContainer) ent = Entry(lab, width=5) ent.place(relx=0.5, rely=0.5, anchor=CENTER) self.entries.append(ent) ent.lab = lab lab = Label(self.gridContainer, relief=RIDGE, height=2) lab.grid( row=self.numRows, column=self.numColumns, sticky=N + E + S + W) self.ent_but = Button( lab, font=self.buttonFont, text="Press", command=gui.MAKE_FUNC( self.action, "newRow")) self.ent_but.lab = lab self.ent_but.place(relx=0.5, rely=0.5, anchor=CENTER) def getEntries(self): return [e.get() for e in self.entries] def getSelectedCells(self): return dict(self.selectedCells) # function to scroll the canvas/scrollbars # gets the requested grid # and checks the event.delta to determine where to scroll # https://www.daniweb.com/programming/software-development/code/217059/using-the-mouse-wheel-with-tkinter-python def __scrollGrid(self, event, title): if gui.GET_PLATFORM() in [gui.WINDOWS, gui.MAC]: if gui.GET_PLATFORM() == gui.WINDOWS: val = event.delta / 120 else: val = event.delta val = val * -1 if event.delta in [1, -1]: self.mainCanvas.yview_scroll(val, "units") elif event.delta in [2, -2]: self.mainCanvas.xview_scroll(val, "units") elif gui.GET_PLATFORM() == gui.LINUX: if event.num == 4: self.mainCanvas.yview_scroll(-1 * 2, "units") elif event.num == 5: self.mainCanvas.yview_scroll(2, "units") def __refreshGrids(self, event): '''Reset the scroll region to encompass the inner frame''' self.mainCanvas.configure(scrollregion=self.mainCanvas.bbox("all")) #can.itemconfig(_id, height=frame.mainCanvas.height, width=frame.mainCanvas.width) def __gridCellEnter(self, event): cell = event.widget cell.config(background=self.cellOverBg) def __gridCellLeave(self, event): cell = event.widget gridPos = cell.gridPos if self.selectedCells[gridPos]: cell.config(background=self.cellSelectedBg) else: cell.config(background=self.cellBg) def __gridCellClick(self, event): cell = event.widget gridPos = cell.gridPos if self.selectedCells[gridPos]: self.selectedCells[gridPos] = False cell.config(background=self.cellBg) else: self.selectedCells[gridPos] = True cell.config(background=self.cellSelectedBg) ########################## # Simple SplashScreen ########################## class SplashScreen(Toplevel): def __init__(self, parent, text="appJar", fill="red", stripe="black", fg="white", font=44): Toplevel.__init__(self, parent) lab = Label(self, bg=stripe, fg=fg, text=text, height=3, width=50) lab.config(font=("Courier", font)) lab.place(relx=0.5, rely=0.5, anchor=CENTER) width = str(self.winfo_screenwidth()) height = str(self.winfo_screenheight()) self.geometry(width+"x"+height) self.config(bg=fill) self.attributes("-alpha", 0.95) self.attributes("-fullscreen", True) self.overrideredirect(1) self.update() ########################## # CopyAndPaste Organiser ########################## class CopyAndPaste(): def __init__(self, topLevel): self.topLevel = topLevel self.inUse = False def setUp(self, widget): self.inUse = True # store globals self.widget = widget self.widgetType = widget.__class__.__name__ # query widget self.canCut = False self.canCopy = False self.canSelect = False self.canUndo = False self.canRedo = False try: self.canPaste = len(self.topLevel.clipboard_get()) > 0 except: self.canPaste = False if self.widgetType in ["Entry", "AutoCompleteEntry"]: if widget.selection_present(): self.canCut = self.canCopy = True if widget.index(END) > 0: self.canSelect = True elif self.widgetType in ["ScrolledText", "Text"]: if widget.tag_ranges("sel"): self.canCut = self.canCopy = True if widget.index("end-1c") != "1.0": self.canSelect = True if widget.edit_modified(): self.canUndo = True self.canRedo = True elif self.widgetType == "OptionMenu": self.canCopy = True self.canPaste = False def copy(self): if self.widgetType == "OptionMenu": self.topLevel.clipboard_clear() self.topLevel.clipboard_append(self.widget.var.get()) else: self.widget.event_generate('<<Copy>>') self.widget.selection_clear() def cut(self): if self.widgetType == "OptionMenu": self.topLevel.bell() else: self.widget.event_generate('<<Cut>>') self.widget.selection_clear() def paste(self): self.widget.event_generate('<<Paste>>') self.widget.selection_clear() def undo(self): try: self.widget.edit_undo() except: self.topLevel.bell() def redo(self): try: self.widget.edit_redo() except: self.topLevel.bell() def clearClipboard(self): self.topLevel.clipboard_clear() def clearText(self): try: self.widget.delete(0.0, END) # TEXT except: try: self.widget.delete(0, END) # ENTRY except: self.topLevel.bell() def selectAll(self): try: self.widget.select_range(0, END) # ENTRY except: try: self.widget.tag_add("sel", "1.0", "end") # TEXT except: self.topLevel.bell() # clear the undo/redo stack def resetStack(self): self.widget.edit_reset() ##################################### # MAIN - for testing ##################################### if __name__ == "__main__": print("This is a library class and cannot be executed.") sys.exit()
unlicense
2,429,787,942,709,151,000
33.04962
134
0.527928
false
albugrimenko/Python_Pieces
tools/tools_ml.py
1
5192
""" Basic functions for data preparation and results analysis in ML. A general purpose functions for converting data from the dictionary format to an (n x k) python list that's ready for training an sklearn algorithm - sklearn loves to work with numpy arrays. n--no. of key-value pairs in dictonary k--no. of features being extracted Input data file structure: - first column is a label - last column is a note (will be ignored) - everything in a middle are features Example: Label,Data,Note @author: Alex Bugrimenko """ # from sklearn.preprocessing import MinMaxScaler # from sklearn.preprocessing import StandardScaler from tools.tools_data import audit_get_counts def data_split(data, prob): """ Splits data into fractions [prob, 1 - prob] """ import random results = [], [] for row in data: results[0 if random.random() < prob else 1].append(row) return results def data_train_test_split_simple(features, labels, test_size, is_print=False): """ Splits data into training and test sets test_pct - percent of data returned in a test array """ data = zip(features, labels) train, test = data_split(data, 1 - test_size) f_train, l_train = zip(*train) f_test, l_test = zip(*test) if is_print: print("--- Train/test split results ---") print("-- # features:", len(features[0])) print("-- # items in train:", len(l_train)) audit_get_counts(l_train, is_print=True) print("-- # items in test:", len(l_test)) audit_get_counts(l_test, is_print=True) return f_train, f_test, l_train, l_test def get_train_test_split(features, labels, test_size=0.3, random_state=0, is_print=False): """ Gets rescaled and ready for classifiers data: - performs train/test split using specified test_size - prints train/test stats if is_print = True """ from sklearn.model_selection import train_test_split # get train/test split f_train, f_test, l_train, l_test = train_test_split(features, labels, test_size=test_size, random_state=random_state) if is_print: print("--- Train/test split results ---") print("-- # features:", len(features[0])) print("-- # items in train:", len(l_train)) audit_get_counts(l_train, is_print=True) print("-- # items in test:", len(l_test)) audit_get_counts(l_test, is_print=True) return f_train, f_test, l_train, l_test def scale_feature(arr): """ Rescales all values within array to be in range [0..1] When all values are identical: assign each new feature to 0.5 (halfway between 0.0 and 1.0) """ xmin = min(arr) xmax = max(arr) res = [] if xmin == xmax: res = [.5 for _ in arr] else: res = [float((x - xmin)) / (xmax - xmin) for x in arr] return res def data_feature_label_split(data, is_rescale_required=True, is_print=False, is_skip_first=False): """ Separates out the labels, features and notes and put it into separate lists. Standardize features by removing the mean and scaling to unit variance when is_rescale_required=True """ labels = [] features = [] notes = [] i = 0 for item in data: # skip the header if len(item) == 0 or (i == 0 and is_skip_first): i = 1 continue labels.append(item[0]) features.append(item[1:-1]) notes.append(item[-1]) if is_print: print("----- Data Feature-Label split -----") print("--- # features:", len(features[0])) print("--- # samples:", len(features)) print("--- # samples by label:") audit_get_counts(labels, is_print=True) if is_rescale_required: # rescale to [0..1] # scaler = StandardScaler() # MinMaxScaler() # features = scaler.fit_transform(X=features) features = scale_feature() if is_print: print("--+ features have been re-scaled to [0..1]") return features, labels, notes def print_feature_importances(feature_importances): cnt = 0 for i, f in enumerate(feature_importances): if f > 0: cnt += 1 print("feature %d (%f)" % (i, f)) print("Total # features used:", cnt) def print_feature_importances_top(feature_importances, feature_names=None, top=10): if top < 1: top = 10 cnt = 0 if feature_names is None or len(feature_names) != len(feature_importances): feature_names = [] for i, _ in enumerate(feature_importances): feature_names.append("Feature {0}".format(i)) importances = zip(feature_names, feature_importances) importances = sorted(importances, key=lambda x: x[1], reverse=True) for i, f in enumerate(importances): if f[1] > 0: cnt += 1 if i <= top: print("feature %d: [%s] (%f)" % (i, f[0], f[1])) print("Total # features used:", cnt) # ---------- main calls ------------- if __name__ == "__main__": print("~~~ There is no Main method defined. ~~~")
mit
-4,066,301,384,943,164,000
33.845638
111
0.596302
false
Vritrahan/HarrisSimilarity
harris.py
1
2028
import cv2 import numpy as np import copy import sys try: img1 = cv2.imread(sys.argv[1]) img2 = cv2.imread(sys.argv[2]) except Exception as e: print "MOAR arguments" sys.exit() # print type(img1) img_orig1 = copy.copy(img1) img_orig2 = copy.copy(img2) gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # must give a float32 data type input gray1 = np.float32(gray1) dst1 = cv2.cornerHarris(gray1, 2, 3, 0.04) # cv2.cornerHarris(src, blockSize, ksize, k) -> dst # src - Input single-channel 8-bit or floating-point image. # dst - Image to store the Harris detector responses. It has the type CV_32FC1 and the same size as src . # blockSize - Neighborhood size. # ksize - Aperture parameter for the Sobel() operator. # k - Harris detector free parameter. gray2 = np.float32(gray2) dst2 = cv2.cornerHarris(gray2, 2, 3, 0.04) # print type(dst1) # result is dilated for marking the corners, not important dst1 = cv2.dilate(dst1, None) dst2 = cv2.dilate(dst2, None) # cv2.imshow('Harris output', dst) # Threshold for an optimal value, it may vary depending on the image. img1[dst1 > 0.001 * dst1.max()] = [0, 0, 255] img2[dst2 > 0.001 * dst2.max()] = [0, 0, 255] dst1[dst1 < 0.01 * dst1.max()] = 0 dst2[dst2 < 0.01 * dst2.max()] = 0 for i in range(0, dst1.shape[0]): for j in range(0, dst1.shape[1]): if dst1[i][j] > 0: x = i y = j break for i in range(0, dst2.shape[0]): for j in range(0, dst2.shape[1]): if dst2[i][j] > 0: x = x - i y = y - j break dst2 = np.roll(np.roll(dst2, x, 0), y, 1) diff = np.sqrt(abs(dst1 ** 2 - dst2 ** 2)) diss = np.sum(diff) / (dst2.shape[0] * dst2.shape[1]) print "Dissimilarity: " + str(diss) if diss > 100000: print "Images are dissimilar" else: print "Images are similar" cv2.imshow('Corners #1', img1) cv2.imshow('Corners #2', img2) cv2.imshow('Difference', diff) if cv2.waitKey(0) & 0xff == 27: cv2.destroyAllWindows()
gpl-3.0
-2,987,746,590,666,416,600
27.56338
105
0.639546
false
joshp123/genx
models/lib/neutron_refl.py
3
9416
''' Library for reflectivity calculations with neutrons. Programmed by Matts Bjorck Last changed 2011-06-11 ''' from numpy import * import math_utils as mu def make_D(q_p,q_m): return mat([[1,1,0,0],[q_p,-q_p,0,0],[0,0,1,1],[0,0,q_m,-q_m]]) def make_P(q_p,q_m,d): return mat([[exp(-1.0j*q_p*d),0,0,0],[0,exp(1.0j*q_p*d),0,0],\ [0,0,exp(-1.0j*q_m*d),0],[0,0,0,exp(1.0j*q_m*d)]]) def make_R(theta_diff): ct=cos(theta_diff/2.0) st=sin(theta_diff/2.0) return mat([[ct,0,st,0],[0,ct,0,st],[-st,0,ct,0],[0,-st,0,ct]]) def make_Sigma(q_p,q_m,sigma): ep=exp(-q_p**2*sigma**2) em=exp(-q_m**2*sigma**2) return mat([[ep,0,0,0],[0,1,0,0],[0,0,em,0],[0,0,0,1]]) def ReflOld(Q,Vp,Vm,d,M_ang): ''' Calculates spin-polarized reflectivity according to S.J. Blundell and J.A.C. Bland Phys rev. B. vol 46 3391 (1992) Input parameters: Q : Scattering vector in reciprocal angstroms Q=4*pi/lambda *sin(theta) Vp: Neutron potential for spin up Vm: Neutron potential for spin down d: layer thickness M_ang: Angle of the magnetic moment(radians!) M_ang=0 =>M//nuetron spin No roughness is included! Returns: (Ruu,Rdd,Rud,Rdu) (up-up,down-down,up-down,down-up) ''' # Assume first element=substrate and last=ambient! Q=Q/2.0 # Wavevectors in the layers Qi_p=sqrt(Q[:,newaxis]**2-Vp) Qi_m=sqrt(Q[:,newaxis]**2-Vm) #print Qi_p.shape #print d.shape #print M_ang.shape #M_ang=M_ang[::-1] #d=d[::-1] #Angular difference between the magnetization theta_diff=M_ang[1:]-M_ang[:-1] #Unsure but think this is correct #theta_diff=theta_diff[::-1] #print theta_diff def calc_mat(q_p,q_m,d,theta_diff): return make_D(q_p,q_m)*make_P(q_p,q_m,d)*make_D(q_p,q_m)**-1*make_R(theta_diff) #Calculate the transfer matrix - this implementation is probably REALLY slow... M=[make_D(qi_p[-1],qi_m[-1])**-1*make_R(theta_diff[-1])*reduce(lambda x,y:y*x,[calc_mat(q_p,q_m,di,theta_diffi) for q_p,q_m,di,theta_diffi in zip(qi_p[1:-1],qi_m[1:-1],d[1:-1],theta_diff[:-1])] ,make_D(qi_p[0],qi_m[0])) for qi_p,qi_m in zip(Qi_p,Qi_m)] #M=[make_D(qi_p[0],qi_m[0])**-1*make_R(theta_diff[0])*reduce(lambda x,y:y*x,[calc_mat(q_p,q_m,di,theta_diffi) # for q_p,q_m,di,theta_diffi in zip(qi_p[1:-1],qi_m[1:-1],d[1:-1],theta_diff[1:])] # ,make_D(qi_p[-1],qi_m[-1])) # for qi_p,qi_m in zip(Qi_p,Qi_m)] #print 'Matrix calculated' #print M[0] # transform M into an array - for fast indexing M=array([array(m) for m in M]) #print M.shape Ruu=(M[:,1,0]*M[:,2,2]-M[:,1,2]*M[:,2,0])/(M[:,0,0]*M[:,2,2]-M[:,0,2]*M[:,2,0]) Rud=(M[:,3,0]*M[:,2,2]-M[:,3,2]*M[:,2,0])/(M[:,0,0]*M[:,2,2]-M[:,0,2]*M[:,2,0]) Rdu=(M[:,1,2]*M[:,0,0]-M[:,1,0]*M[:,0,2])/(M[:,0,0]*M[:,2,2]-M[:,0,2]*M[:,2,0]) Rdd=(M[:,3,2]*M[:,0,0]-M[:,3,0]*M[:,0,2])/(M[:,0,0]*M[:,2,2]-M[:,0,2]*M[:,2,0]) #print 'Reflectivites calculated' return abs(Ruu)**2,abs(Rdd)**2,abs(Rud)**2,abs(Rdu)**2 #====================================================================== # New quicker way of doing spin pol. calcs. ctype = complex128 def ass_X(k_p, k_m, theta_diff): ''' Make the interface transmission matrix for neutron reflection from a interface. ''' k_pj1 = k_p[:, :-1] k_pj = k_p[:, 1:] k_mj1 = k_m[:, :-1] k_mj = k_m[:, 1:] costd = cos(theta_diff/2.0) sintd = sin(theta_diff/2.0) X = zeros((4,4) + k_pj.shape, dtype = ctype) X[0,0] = costd*(k_pj1 + k_pj)/2/k_pj X[0,1] = -costd*(k_pj1 - k_pj)/2/k_pj X[0,2] = sintd*(k_pj + k_mj1)/2/k_pj X[0,3] = sintd*(k_pj - k_mj1)/2/k_pj #X[0] = X[0]/2/k_pj X[1,0] = X[0,1]#-(costd*(k_pj1 - k_pj))/(2*k_pj) X[1,1] = X[0,0]#(costd*(k_pj1 + k_pj))/(2*k_pj) X[1,2] = X[0,3]#(sintd*(k_pj - k_mj1))/(2*k_pj) X[1,3] = X[0,2]#(sintd*(k_pj + k_mj1))/(2*k_pj) X[2,0] = -(sintd*(k_pj1 + k_mj))/(2*k_mj) X[2,1] = (sintd*(k_pj1 - k_mj))/(2*k_mj) X[2,2] = (costd*(k_mj1 + k_mj))/(2*k_mj) X[2,3] = -(costd*(k_mj1 - k_mj))/(2*k_mj) X[3,0] = X[2,1]#(sintd*(k_pj1 - k_mj))/(2*k_mj) X[3,1] = X[2,0]#-(sintd*(k_pj1 + k_mj))/(2*k_mj) X[3,2] = X[2,3]#-(costd*(k_mj1 - k_mj))/(2*k_mj) X[3,3] = X[2,2]#(costd*(k_mj1 + k_mj))/(2*k_mj) return X def gauss(q, sigma2): '''Fourier transform of the interface roughness weight function ''' return exp(-q**2*sigma2/2.0) def include_sigma(X, k_p, k_m, sigma, w = gauss): '''Function to include roughness into the interface matrix. ''' sigma2 = sigma[...,:-1]**2 k_pj1 = k_p[:, :-1] k_pj = k_p[:, 1:] k_mj1 = k_m[:, :-1] k_mj = k_m[:, 1:] X[0,0] = X[0,0]*w(k_pj - k_pj1, sigma2) X[0,1] = X[0,1]*w(k_pj + k_pj1, sigma2) X[0,2] = X[0,2]*w(k_pj - k_mj1, sigma2) X[0,3] = X[0,3]*w(k_pj + k_mj1, sigma2) X[1,0] = X[0,1]#X[1,0]*w(k_pj + k_pj1, sigma2) X[1,1] = X[0,0]#X[1,1]*w(k_pj - k_pj1, sigma2) X[1,2] = X[0,3]#X[1,2]*w(k_pj + k_mj1, sigma2) X[1,3] = X[0,2]#X[1,3]*w(k_pj - k_mj1, sigma2) X[2,0] = X[2,0]*w(k_mj - k_pj1, sigma2) X[2,1] = X[2,1]*w(k_mj + k_pj1, sigma2) X[2,2] = X[2,2]*w(k_mj - k_mj1, sigma2) X[2,3] = X[2,3]*w(k_mj + k_mj1, sigma2) X[3,0] = X[2,1]#X[3,0]*w(k_mj + k_pj1, sigma) X[3,1] = X[2,0]#X[3,1]*w(k_mj - k_pj1, sigma) X[3,2] = X[2,3]#X[3,2]*w(k_mj + k_mj1, sigma) X[3,3] = X[2,2]#X[3,3]*w(k_mj - k_mj1, sigma) return X def ass_P(k_p, k_m, d): ''' Make the layer proagation matrix for a layer. ''' P = zeros((4,4) + k_p.shape, dtype=ctype) P[0,0] = exp(-1.0J*k_p*d) P[1,1] = 1/P[0,0]#exp(1.0J*k_p*d) P[2,2] = exp(-1.0J*k_m*d) P[3,3] = 1/P[2,2]#exp(1.0J*k_m*d) return P def Refl(Q, Vp, Vm, d, M_ang, sigma = None): '''A quicker implementation than the ordinary slow implementaion in Refl Calculates spin-polarized reflectivity according to S.J. Blundell and J.A.C. Blnd Phys rev. B. vol 46 3391 (1992) The algorithm assumes that the first element in the arrays represents the substrate and the last the ambient layer. Input parameters: Q : Scattering vector in reciprocal angstroms Q=4*pi/lambda *sin(theta) Vp: Neutron potential for spin up Vm: Neutron potential for spin down d: layer thickness M_ang: Angle of the magnetic moment(radians!) M_ang=0 =>M//nuetron spin sigma: The roughness of the upper interface. Returns: (Ruu,Rdd,Rud,Rdu) (up-up,down-down,up-down,down-up) ''' # Assume first element=substrate and last=ambient! k_amb = Q[:, newaxis]/2.0 # Wavevectors in the layers k_p = sqrt(k_amb**2 - Vp).astype(complex128) k_m = sqrt(k_amb**2 - Vm).astype(complex128) #Angular difference between the magnetization theta_diff=M_ang[1:] - M_ang[:-1] #if sigma == None: # sigma = zeros(d.shape) # Assemble the interface reflectivity matrix X = ass_X(k_p, k_m, theta_diff) if sigma != None: X = include_sigma(X, k_p, k_m, sigma) # Assemble the layer propagation matrices P = ass_P(k_p, k_m, d) # Multiply the propagation matrices with the interface matrix PX = mu.dot4_Adiag(P[...,1:-1], X[...,:-1]) # Multiply up the sample matrix M = mu.dot4(X[...,-1], reduce(mu.dot4, rollaxis(PX, 3)[::-1])) #print M.shape denom = M[0,0]*M[2,2]-M[0,2]*M[2,0] Ruu = (M[1,0]*M[2,2]-M[1,2]*M[2,0])/denom Rud = (M[3,0]*M[2,2]-M[3,2]*M[2,0])/denom Rdu = (M[1,2]*M[0,0]-M[1,0]*M[0,2])/denom Rdd = (M[3,2]*M[0,0]-M[3,0]*M[0,2])/denom return abs(Ruu)**2,abs(Rdd)**2,abs(Rud)**2,abs(Rdu)**2 if __name__=='__main__': Q=arange(0.01,0.2,0.0005) sld_Fe=8e-6 sld_Fe_p=12.9e-6 sld_Fe_m=2.9e-6 sld_Pt=6.22e-6 def pot(sld): lamda=5.0 #return (2*pi/lamda)**2*(1-(1-lamda**2/2/pi*sld)**2) return sld/pi Vp=array([pot(sld_Pt),pot(sld_Fe_p),pot(sld_Pt),pot(sld_Fe_p),0]) Vm=array([pot(sld_Pt),pot(sld_Fe_m),pot(sld_Pt),pot(sld_Fe_m),0]) d=array([3,100,50,100,3]) M_ang=array([0.0,45*pi/180,0.0,90*pi/180,0.0,]) sigma=array([10.,10.,10.,10.,10.0])*0 import time t1 = time.time() for i in range(10): r = Refl(Q,Vp,Vm,d,M_ang, sigma) t2 = time.time() for i in range(10): r_orig = ReflOld(Q,Vp,Vm,d,M_ang) t3 = time.time() print 'Old version: ', t3 - t2 print 'New version: ', t2 - t1 print 'Speedup: ', (t3 - t2)/(t2 - t1) from pylab import * #plot(Q,log10(r[0]+1e-6),Q,log10(r[1]+1e-6),Q,log10(r[2]+1e-6),Q,log10(r[3]+1e-6)) #io.write_array(open('test.dat','w'),array(zip(Q,abs(r[0]),abs(r[1]),abs(r[2])))) for rc in r: plot(Q,log10(abs(rc))) for rc in r_orig: plot(Q,log10(abs(rc)),'.') print 'Done' show() if True: import profile profile.run('[Refl(Q,Vp,Vm,d,M_ang, sigma) for i in range(50)]')
gpl-3.0
4,929,292,113,587,073,000
38.233333
115
0.512744
false
LeiQiao/keras2tensorflow
keras2tensorflow.py
1
9541
import tensorflow as tf import numpy as np import struct LAYER_CONV2D = 1 LAYER_ACTIVATION = 2 LAYER_MAXPOOL = 3 LAYER_FLATTEN = 4 LAYER_DENSE = 5 LAYER_DROPOUT = 6 ACTIVATION_UNKNOWN = 0 ACTIVATION_LINEAR = 1 ACTIVATION_RELU = 2 ACTIVATION_SOFTMAX = 3 ACTIVATION_TANH = 4 ##################################################################### def save_floats(file, floats): ''' Writes floats to file in 1024 chunks.. prevents memory explosion writing very large arrays to disk when calling struct.pack(). ''' step = 1024 written = 0 for i in np.arange(0, len(floats), step): remaining = min(len(floats) - i, step) written += remaining file.write(struct.pack('=%sf' % remaining, *floats[i:i+remaining])) assert written == len(floats) ##################################################################### def load_floats(file, count): assert False, "UNSUPPORT" ##################################################################### class keras_conv2d: weights = None biases = None dim_ordering = None def __init__(self, keras_layer): self.weights = keras_layer.get_weights()[0] self.biases = keras_layer.get_weights()[1] self.padding = 'VALID' if keras_layer.border_mode == "same": self.padding = 'SAME' self.dim_ordering = "NHWC" if keras_layer.dim_ordering == "th": self.dim_ordering = "NCHW" def dump_tf_layer(self, prev_tf_layer): w = tf.constant(self.weights) if self.dim_ordering == "NCHW": w = tf.transpose(w, [2, 3, 1, 0]) prev_tf_layer = tf.transpose(prev_tf_layer, [0, 2, 3, 1]) b = tf.constant(self.biases) tf_layer = tf.nn.conv2d(prev_tf_layer, w, strides=[1,1,1,1], padding=self.padding) + b if self.dim_ordering == "NCHW": tf_layer = tf.transpose(tf_layer, [0, 3, 1, 2]) return tf_layer def save_to_file(self, file): file.write(struct.pack('I', LAYER_CONV2D)) file.write(struct.pack('I', self.weights.shape[0])) file.write(struct.pack('I', self.weights.shape[1])) file.write(struct.pack('I', self.biases.shape[0])) save_floats(file, self.weights.flatten()) save_floats(file, self.biases.flatten()) def load_from_file(self, file): assert False, "UNSUPPORT" ##################################################################### class keras_activation: activation = ACTIVATION_UNKNOWN def __init__(self, keras_layer): act = keras_layer.get_config()['activation'] if act == "linear": self.activation = ACTIVATION_LINEAR elif act == "relu": self.activation = ACTIVATION_RELU elif act == "softmax": self.activation = ACTIVATION_SOFTMAX elif act == "tanh": self.activation = ACTIVATION_TANH else: assert False, "Unsupported activation type: %s" % act def dump_tf_layer(self, prev_tf_layer): if self.activation == ACTIVATION_LINEAR: tf_layer = prev_tf_layer elif self.activation == ACTIVATION_RELU: tf_layer = tf.nn.relu(prev_tf_layer) elif self.activation == ACTIVATION_SOFTMAX: tf_layer = tf.nn.softmax(prev_tf_layer) elif self.activation == ACTIVATION_TANH: tf_layer = tf.tanh(prev_tf_layer) return tf_layer def save_to_file(self, file): file.write(struct.pack('I', LAYER_ACTIVATION)) file.write(struct.pack('I', self.activation)) def load_from_file(self, file): assert False, "UNSUPPORT" ##################################################################### class keras_maxpool: pool_size = None padding = None dim_ordering = None def __init__(self, keras_layer): self.pool_size = keras_layer.get_config()['pool_size'] self.padding = 'VALID' if keras_layer.border_mode != "valid": assert False, "Unsupported padding type: %s" % keras_layer.border_mode self.dim_ordering = "NHWC" if keras_layer.dim_ordering == "th": self.dim_ordering = "NCHW" def dump_tf_layer(self, prev_tf_layer): if self.dim_ordering == "NCHW": prev_tf_layer = tf.transpose(prev_tf_layer, [0, 2, 3, 1]) tf_layer = tf.nn.max_pool(prev_tf_layer, ksize=[1, self.pool_size[0], self.pool_size[1], 1], strides=[1, self.pool_size[0], self.pool_size[1], 1], padding=self.padding) if self.dim_ordering == "NCHW": tf_layer = tf.transpose(tf_layer, [0, 3, 1, 2]) return tf_layer def save_to_file(self, file): file.write(struct.pack('I', LAYER_MAXPOOL)) file.write(struct.pack('I', self.pool_size[0])) file.write(struct.pack('I', self.pool_size[1])) def load_from_file(self, file): assert False, "UNSUPPORT" ##################################################################### class keras_flatten: def __init__(self, keras_layer):None def dump_tf_layer(self, prev_tf_layer): tf_layer = tf.reshape(prev_tf_layer, [-1]) return tf_layer def save_to_file(self, file): file.write(struct.pack('I', LAYER_FLATTEN)) def load_from_file(self, file): assert False, "UNSUPPORT" ##################################################################### class keras_dense: weights = None biases = None def __init__(self, keras_layer): self.weights = keras_layer.get_weights()[0] self.biases = keras_layer.get_weights()[1] def dump_tf_layer(self, prev_tf_layer): tf_layer = tf.reshape(prev_tf_layer, [-1, self.weights.shape[0]]) tf_layer = tf.matmul(tf_layer, self.weights) + self.biases tf_layer = tf.reshape(tf_layer, [-1]) return tf_layer def save_to_file(self, file): file.write(struct.pack('I', LAYER_DENSE)) file.write(struct.pack('I', self.weights.shape[0])) file.write(struct.pack('I', self.weights.shape[1])) file.write(struct.pack('I', self.biases.shape[0])) save_floats(file, self.weights.flatten()) save_floats(file, self.biases.flatten()) def load_from_file(self, file): assert False, "UNSUPPORT" ##################################################################### class keras_dropout: p = 0 def __init__(self, keras_layer): self.p = keras_layer.p def dump_tf_layer(self, prev_tf_layer): # prob = tf.constant(self.p) prob = tf.constant(1.0) tf_layer = tf.nn.dropout(prev_tf_layer, prob) return tf_layer def save_to_file(self, file): file.write(struct.pack('I', LAYER_DROPOUT)) file.write(struct.pack('f', p)) def load_from_file(self, file): assert False, "UNSUPPORT" ##################################################################### class keras2tensorflow: layers = [] input_shape = [] def __init__(self, keras_model): self.layers = [] self.input_shape = keras_model.layers[0].batch_input_shape for keras_layer in keras_model.layers: layer_type = type(keras_layer).__name__ tf_layer = None if layer_type == "Convolution2D": tf_layer = keras_conv2d(keras_layer) elif layer_type == "Activation": tf_layer = keras_activation(keras_layer) elif layer_type == "MaxPooling2D": tf_layer = keras_maxpool(keras_layer) elif layer_type == "Flatten": tf_layer = keras_flatten(keras_layer) elif layer_type == "Dense": tf_layer = keras_dense(keras_layer) elif layer_type == "Dropout": # tf_layer = keras_dropout(keras_layer) continue else: assert False, "Unsupported layer type: %s" % layer_type self.layers.append(tf_layer) def save_protobuf(self, filename): graph_dump = tf.Graph() with graph_dump.as_default(): tf_input = tf.placeholder("float32", self.input_shape, name="input") tf_prediction = self.dump_tf_layer(tf_input) tf_output = tf.add(tf_prediction, 0, name="output") sess = tf.Session() graph_def = graph_dump.as_graph_def() tf.train.write_graph(graph_def, '', filename, as_text=False) sess.close() def layer_count(self): return len(self.layers) def dump_tf_layer_step(self, prev_tf_layer, index): if (index < 0) or (index >= len(self.layers)): index = len(self.layers)-1 now = 0 for tf_layer in self.layers: prev_tf_layer = tf_layer.dump_tf_layer(prev_tf_layer) now += 1 if now > index: break return prev_tf_layer def predict_step(self, data, index): sess = tf.Session() tf_input = tf.placeholder("float32", self.input_shape, name="input") tf_predict = self.dump_tf_layer_step(tf_input, index) result = sess.run(tf_predict, feed_dict={tf_input:data}) sess.close() return result def dump_tf_layer(self, prev_tf_layer): return self.dump_tf_layer_step(prev_tf_layer, -1) def predict(self, data): return self.predict_step(data, -1)
gpl-3.0
8,165,334,875,677,832,000
34.080882
87
0.538099
false
manuamador/PIRE
EIRP_light.py
1
5124
# -*- coding: utf-8 -*- """ This program measures the equivalent isotropic radiated power of an equipment under test (maximal spectral density to be precise) If the polarization of the EUT is known without any doubt (e.g the EUT has an external linear antenna) one polarization (along the antenna) can be enough. 3 cutting planes, 1 polarization """ from __future__ import division import time import visa import scipy import os from numpy import * import matplotlib.pyplot as plt #Instruments modules import Spectrum import TurnTable nom=raw_input('Enter the name of the equipment?') if (os.path.isdir('Results_'+nom)==False): os.mkdir('Results_'+nom) #Calibration files Correction_H=loadtxt('SynthCal_Pol_H.txt') Correction_V=loadtxt('SynthCal_Pol_V.txt') os.chdir('Results_'+nom) f=Correction_H[:,0] ############################################### ########## Testing parameters ############## ############################################### fstart=f[0] #Start frequency fstop=f[-1] #Stop frequency fcenter=0.5*(fstart+fstop) #Center frequency fspan=fstop-fstart #Span RBW=1e6 #RBW size in Hz VBW=100e3 #VBW size in Hz SwpPt=len(f) #Number of points N=19 #Number of incident angles Angles=linspace(0,360,N) Pol=2 #Number of polarizations Exp=3 #Number of cutting planes Tmes=0.05 #dwell time ###Stop criterion ###channels center frequencies (european wifi) ##f0=2.412e9 ##fn=2.472e9 ##n=13 #number of channels ##fc=linspace(f0,fn,n) ###channel center frequencies indexes ##peaksindx=zeros(len(fc),dtype=int) ##for i in range(len(fc)): ## a=int(argmin(abs(f-fc[i]))) ## peaksindx[i]=a Level_criterion=-35 print '___________________________\n Instruments initializations\n' print '\nSpectrum analyzer:' Spectre=Spectrum.FSV30() Spectre.reset() Spectre.startFreq(fstart) Spectre.stopFreq(fstop) Spectre.RBW(RBW) Spectre.SweepPoint(SwpPt) Spectre.MaxHold() Spectre.UnitDBM() print '\nTurn table:' TTable=TurnTable.PlateauCA() TTable.reset() print '____________________\nMeasurement\n' Measurement=empty([Pol,Exp,N,2]) Raw_Traces=empty([Pol,Exp,N,2,SwpPt]) for k in range(Exp): #Boucle sur l'exposition de l'objet sous test print ("Cutting plane %s " %k) if k==0: l=0 if k==1: l=1 if k==2: l=1 if l==0: print 'Polarization: V' Polarization='V' else: print 'Polarization: H' Polarization='H' raw_input("\n Antenna polarization : %s, Cutting plane : %i \n Press Enter to continue...\n" %(Polarization,k)) for j in range(0,len(Angles)): #print ("Go to %s deg" %(Angles [j])) TTable.setPosition(Angles [j]) Spectre.readwrite() Spectre.MaxHold() time.sleep(Tmes) #raw_input("\n Press Enter to validate the measurement\n") Level = Spectre.getTrace(SwpPt) if Polarization=='V': cLevel=Level+Correction_V[:,1] else: cLevel=Level+Correction_H[:,1] #criterion automatic stop #while (min(cLevel[peaksindx])<Level_criterion): #every channel #while (min(cLevel[peaksindx])<Level_criterion): #one channel #while mean(Level[peaksindx]>Level_criterion)<p/n: #p channels among n # Level = Spectre.getTrace(SwpPt) # if Polarization=='V': # cLevel=Level+Correction_V[:,1] # else: # cLevel=Level+Correction_H[:,1] # time.sleep(0.5) Trace=Level MaxLevel=max(cLevel) MaxIdx =cLevel.argmax() Measurement[l,k,j,:]=array([f[MaxIdx],MaxLevel]) Raw_Traces[l,k,j,:]=Trace print ' %s deg Max EIRP = %2.2f mW/MHz' %((Angles [j]),10**(MaxLevel/10)) print ("\n\nBack to 0 deg.") TTable.setPosition(0) r=(10**((Measurement[l,k,:,1])/10)) plt.clf() plt.polar((Angles*pi/180),r) Graphlin= 'Graph_Pol_%s_Exp%s' %(Polarization,k) plt.ylabel('Puissance max mW') plt.title("Diagramme de rayonnement en mW") plt.savefig(Graphlin+'.pdf',bbox='tight') plt.savefig(Graphlin+'.png',bbox='tight') plt.clf() plt.plot(Angles,Measurement[l,k,:,1]) plt.ylabel('Puissance max en dBm') plt.xlabel("Angles en degres") plt.title("Diagramme de rayonnement en dBm") plt.xlim(0,360) plt.grid(True) GraphdBm= 'Graph_lin_%s_Exp%s' %(Polarization,k) plt.savefig(GraphdBm+'.pdf',bbox='tight') plt.savefig(GraphdBm+'.png',bbox='tight') plt.clf() fname = ( '%s_Exp%s.txt') %(Polarization,k) savetxt(fname,Measurement[l,k,:]) savez('Bin_Results.npz',Measurement=Measurement,Raw_Traces=Raw_Traces,f=f)
agpl-3.0
-5,640,217,196,964,577,000
29.435583
154
0.564988
false
huazhisong/graduate_text
prepocessing_bugs.py
1
14347
# -*- code:utf-8 -*- # 导包 import os import re import nltk import pandas as pd import numpy as np from dateutil.parser import parse from datetime import timedelta from nltk.corpus import stopwords def select_lines_comments(lines_raw): # including summary, description, comments # no names selected_lines = [] for line in lines_raw: line = line.strip() if line == '': continue if r'[reply] Comment' in line or r'[reply] Description' in line: continue selected_lines.append(line) return selected_lines def select_lines(lines_raw): # select summary and description, no comments # no names selected_lines = [] for line in lines_raw: line = line.strip() if line == '': continue if r'[reply] Description' in line: continue if r'[reply] Comment' in line: break selected_lines.append(line) return selected_lines def select_lines_include_reply(lines_raw): # select summary, description, no comments # including name selected_lines = [] for line in lines_raw: line = line.strip() if line == '': continue if r'[reply] Comment' in line: break selected_lines.append(line) return selected_lines def clean_raw(raw_text): # select summary, description, comments # including name selected_lines = [] for line in raw_text: line = line.strip() if line == '': continue selected_lines.append(line) return selected_lines # utlize cnn clean_str method to clean def clean_raw_cnn(raw_text): string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", raw_text) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) clean_text = re.sub(r"\s{2,}", " ", string) return clean_text def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return nltk.corpus.wordnet.ADJ elif treebank_tag.startswith('V'): return nltk.corpus.wordnet.VERB elif treebank_tag.startswith('N'): return nltk.corpus.wordnet.NOUN elif treebank_tag.startswith('R'): return nltk.corpus.wordnet.ADV else: return 'n' def clean_words(words_raw, wnl, english_stopwords): pos_words = nltk.pos_tag(words_raw) clean_words = [wnl.lemmatize(word_pos[0], get_wordnet_pos( word_pos[1])) for word_pos in pos_words] clean_words = ' '.join(clean_words).lower().split() clean_words = [ word for word in clean_words if word not in english_stopwords] clean_words = [word for word in clean_words if word.isalpha()] return clean_words # read lines def read_lines(file_path): # open description file with open(file_path, encoding='latin2') as f: # remove last 5 lines lines_raw = f.readlines() # read lines specially selected_lines = clean_raw(lines_raw) # raw text raw_text = ' '.join(selected_lines) # decode utf8 coding raw_text = raw_text.encode('utf8').decode('utf8') # sentences tokinzer sentences = nltk.sent_tokenize(raw_text) tokens = [] # dealing words wnl = nltk.WordNetLemmatizer() english_stopwords = stopwords.words('english') for sentence in sentences: # cean raw sentence sentence = clean_raw_cnn(sentence) # words tokenizer raw_words = nltk.word_tokenize(sentence) # clearn word tmp = clean_words(raw_words, wnl, english_stopwords) tokens.extend(tmp) assert len(tokens) > 0 line = ' '.join(tokens) return line def parse_when(when): tz_lookup_tabel = {'EDT': timedelta(hours=12), 'EST': timedelta( hours=13), 'PDT': timedelta(hours=15), 'PST': timedelta(hours=16)} tz = when.split()[2] t = parse(when) + tz_lookup_tabel[tz] return t.strftime('%Y-%m-%d %H:%M:%S') def merged_files(data_files, results_files): # 读取文件,处理并写入bugs_all.csv文件 for bug_file in os.listdir(data_files + '/buglist'): bug_dir = bug_file.split('.')[0] file_path = data_files + '/buglist/' + bug_file with open(file_path, mode='r', encoding='utf8') as f: bugs = f.readlines()[1:] for bug in bugs: tmp = bug.split(',') bug_id, bug_who = tmp[0], tmp[4].split( '"')[1] if '"' in tmp[4] else tmp[4] bug_assingee = bug_who.split( '@')[0] if '@' in bug_who else bug_who print(data_files + '/description/' + bug_dir + '/' + bug_id + '.txt') # 读取描述文件 file_path = data_files +\ '/description/' + bug_dir + '/' + bug_id + '.txt' line = read_lines(file_path) # for raw data # line = "\""+line + "\"" # 读取修改时间 print(data_files + '/bughistory_raw/' + bug_dir + '/' + bug_id + '.csv') file_path = data_files +\ '/bughistory_raw/' + bug_dir + '/' + bug_id + '.csv' with open(file_path, encoding='latin2') as f: when = f.readlines()[-1].split(',')[0] when = parse_when(when) bug = ','.join([when, bug_id, bug_who, line, bug_assingee]) # 写入文件 with open(results_files, mode='a', encoding='utf8') as f: f.write(bug) f.write('\n') def sortedbytimesplited(results_files): train_all = pd.read_csv(results_files, parse_dates=[3]) train_all_sorted = train_all.sort_values('when') # 将数据分成11份 bug_len = len(train_all_sorted) bug_part_size = int((bug_len - 1) / 11) + 1 for i in range(11): begin_index = i * bug_part_size end_index = min((i + 1) * bug_part_size, bug_len - 1) bug_parted = train_all_sorted.iloc[begin_index:end_index] bug_parted.to_csv(data_files + sub_dir + str(i) + '.csv', header=True, index=False) def get_term_dict(doc_terms_list): term_set_dict = {} for doc_terms in doc_terms_list: for term in doc_terms: term_set_dict[term] = 1 term_set_list = sorted(term_set_dict.keys()) # term set 排序后,按照索引做出字典 term_set_dict = dict(zip(term_set_list, range(len(term_set_list)))) return term_set_dict def get_class_dict(doc_class_list): class_set = sorted(list(set(doc_class_list))) class_dict = dict(zip(class_set, range(len(class_set)))) return class_dict def stats_class_df(doc_class_list, class_dict): class_df_list = [0] * len(class_dict) for doc_class in doc_class_list: class_df_list[class_dict[doc_class]] += 1 return class_df_list def stats_term_class_df(doc_terms_list, doc_class_list, term_dict, class_dict): term_class_df_mat = np.zeros((len(term_dict), len(class_dict)), np.float32) for k in range(len(doc_class_list)): class_index = class_dict[doc_class_list[k]] doc_terms = doc_terms_list[k] for term in set(doc_terms): term_index = term_dict[term] term_class_df_mat[term_index][class_index] += 1 return term_class_df_mat def feature_selection_mi(class_df_list, term_set, term_class_df_mat): A = term_class_df_mat B = np.array([(sum(x) - x).tolist() for x in A]) C = np.tile(class_df_list, (A.shape[0], 1)) - A N = sum(class_df_list) class_set_size = len(class_df_list) term_score_mat = np.log( ((A + 1.0) * N) / ((A + C) * (A + B + class_set_size))) term_score_max_list = [max(x) for x in term_score_mat] term_score_array = np.array(term_score_max_list) sorted_term_score_index = term_score_array.argsort()[:: -1] term_set_fs = [term_set[index] for index in sorted_term_score_index] print(term_set_fs[:10]) return term_set_fs def feature_selection_ig(class_df_list, term_set, term_class_df_mat): A = term_class_df_mat B = np.array([(sum(x) - x).tolist() for x in A]) C = np.tile(class_df_list, (A.shape[0], 1)) - A N = sum(class_df_list) D = N - A - B - C term_df_array = np.sum(A, axis=1) class_set_size = len(class_df_list) p_t = term_df_array / N p_not_t = 1 - p_t p_c_t_mat = (A + 1) / (A + B + class_set_size) p_c_not_t_mat = (C + 1) / (C + D + class_set_size) p_c_t = np.sum(p_c_t_mat * np.log(p_c_t_mat), axis=1) p_c_not_t = np.sum(p_c_not_t_mat * np.log(p_c_not_t_mat), axis=1) term_score_array = p_t * p_c_t + p_not_t * p_c_not_t sorted_term_score_index = term_score_array.argsort()[:: -1] term_set_fs = [term_set[index] for index in sorted_term_score_index] print(term_set_fs[:10]) return term_set_fs def feature_selection_wllr(class_df_list, term_set, term_class_df_mat): A = term_class_df_mat B = np.array([(sum(x) - x).tolist() for x in A]) C_Total = np.tile(class_df_list, (A.shape[0], 1)) N = sum(class_df_list) C_Total_Not = N - C_Total term_set_size = len(term_set) p_t_c = (A + 1E-6) / (C_Total + 1E-6 * term_set_size) p_t_not_c = (B + 1E-6) / (C_Total_Not + 1E-6 * term_set_size) term_score_mat = p_t_c * np.log(p_t_c / p_t_not_c) term_score_max_list = [max(x) for x in term_score_mat] term_score_array = np.array(term_score_max_list) sorted_term_score_index = term_score_array.argsort()[:: -1] term_set_fs = [term_set[index] for index in sorted_term_score_index] print(term_set_fs[:10]) return term_set_fs def feature_selection(doc_terms_list, doc_class_list, fs_method, percent): class_dict = get_class_dict(doc_class_list) term_dict = get_term_dict(doc_terms_list) class_df_list = stats_class_df(doc_class_list, class_dict) term_class_df_mat = stats_term_class_df( doc_terms_list, doc_class_list, term_dict, class_dict) term_set = [term[0] for term in sorted(term_dict.items(), key=lambda x: x[1])] term_set_size = len(term_set) selection_size = int(term_set_size * percent) term_set_fs = [] if fs_method == 'MI': term_set_fs = feature_selection_mi( class_df_list, term_set, term_class_df_mat) elif fs_method == 'IG': term_set_fs = feature_selection_ig( class_df_list, term_set, term_class_df_mat) elif fs_method == 'WLLR': term_set_fs = feature_selection_wllr( class_df_list, term_set, term_class_df_mat) return term_set_fs[:selection_size] def filter_by_feature_selection(data_dir, fs_method='IG', percent=0.7, encoding='utf8', validation=False): start_index = 2 if validation else 1 for step in range(start_index, 11): data_files = [data_dir + str(i) + '.csv' for i in range(step + 1)] if validation: train_data = pd.concat( [pd.read_csv(file, encoding=encoding) for file in data_files[:-2]]) else: train_data = pd.concat( [pd.read_csv(file, encoding=encoding) for file in data_files[:-1]]) test_data = pd.read_csv(data_files[-1], encoding=encoding) text_train = train_data.text fixer_train = train_data.fixer text_test = test_data.text fixer_test = test_data.fixer # 特征选择 待完善 featurs_names = feature_selection( [doc.split() for doc in text_train], fixer_train.values, fs_method, percent) def filter_by_features_names(doc): doc = doc.split() tmp = [word for word in doc if word in featurs_names] if len(tmp) < 3: return doc else: return ' '.join(tmp) print('Training data selection') text_train.apply(filter_by_features_names) print('Finishing>>>>>>') print('Testing data selection') text_test.apply(filter_by_features_names) print('Finishing>>>>>>') data_train = pd.concat([text_train, fixer_train], axis=1) results_dir = data_dir + fs_method + str('/') if not os.path.exists(data_dir): os.mkdir(data_dir) file_train = results_dir + str(percent) + str(step) + '.train.csv' data_train.to_csv(file_train, index=False) data_test = pd.concat([text_test, fixer_test], axis=1) file_test = results_dir + str(percent) + str(step) + '.test.csv' data_test.to_csv(file_test, index=False) if validation: dev_data = pd.read_csv(data_files[-2], encoding=encoding) text_dev = dev_data.text fixer_dev = dev_data.fixer print('Developing data selection') text_dev.apply(filter_by_features_names) data_dev = pd.concat([text_dev, fixer_dev], axis=1) file_dev = results_dir + str(percent) + str(step) + '.dev.csv' data_dev.to_csv(file_dev, index=False) print('Finishing %s' % step) if __name__ == '__main__': """ import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('averaged_perceptron_tagger') nltk.download('wordnet') """ # 文件存储位置 data_dir = '../../../data/' data_set = '/eclipse' data_files = data_dir + data_set sub_dir = '/song_no_select/' results_files = data_files + sub_dir + 'bugs_all.csv' # 写入列名 with open(results_files, 'w') as f: f.write('when,id,who,text,fixer\n') # 合并文件 merged_files(data_files, results_files) # 将文件分成十一份 sortedbytimesplited(results_files)
agpl-3.0
-7,242,860,560,946,841,000
33.449029
79
0.564997
false
CheML/CheML
cheml/datasets.py
1
12059
"""Dataset loader helpers. This file collects everything related to downloading and organizing CheML and other open datasets for computational chemistry.""" import os import numpy as np import sys try: from urllib.request import urlopen from urllib.error import HTTPError, URLError except: from urllib2 import urlopen, HTTPError, URLError import pickle from sklearn.datasets.base import Bunch from scipy.io import loadmat from sklearn.decomposition import PCA import tarfile def get_data_dirs(data_dir=None): """Returns a priority list of folders where to search for a dataset. If `data_dir` is specified, this will have highest priority. The list is as follows: 1. data_dir if specified 2. the environment variable CHEML_SHARED_DATA if specified 3. the environment variable CHEML_DATA if specified 4. $HOME/CheML_data """ paths = [] cheml_shared_data = os.environ.get("CHEML_SHARED_DATA", None) cheml_data = os.environ.get("CHEML_DATA", None) home_data_folder = os.path.expanduser("~/cheml_data") if data_dir is not None: paths.append(data_dir) if cheml_shared_data is not None: paths.append(cheml_shared_data) if cheml_data is not None: paths.append(cheml_data) paths.append(home_data_folder) return paths HF_URL_BASE = ("https://raw.githubusercontent.com/SamKChang/" "QM_wavelet/master/data/") dataset_info = dict( HF2_1K=("HF/HF2_1K.pkl", HF_URL_BASE + "data_m2.pkl"), HF3_1K=("HF/HF3_1K.pkl", HF_URL_BASE + "data_m3.pkl"), HF4_1K=("HF/HF4_1K.pkl", HF_URL_BASE + "data_m4.pkl"), HF5_1K=("HF/HF5_1K.pkl", HF_URL_BASE + "data_m5.pkl"), HF6_1K=("HF/HF6_1K.pkl", HF_URL_BASE + "data_m6.pkl"), HF2_7K=("HF/HF2_7K.pkl", HF_URL_BASE + "data_m2_7k.pkl"), HF3_10K=("HF/HF3_10K.pkl", HF_URL_BASE + "data_m3_10k.pkl"), HF4_10K=("HF/HF4_10K.pkl", HF_URL_BASE + "data_m4_10k.pkl"), HF5_10K=("HF/HF5_10K.pkl", HF_URL_BASE + "data_m5_10k.pkl"), HF6_10K=("HF/HF6_10K.pkl", HF_URL_BASE + "data_m6_10k.pkl"), HX2=("HF/HX2.pkl", HF_URL_BASE + "data_HX2.pkl"), HX3=("HF/HX3.pkl", HF_URL_BASE + "data_HX3.pkl"), HX4=("HF/HX4.pkl", HF_URL_BASE + "data_HX4.pkl"), HX5=("HF/HX5.pkl", HF_URL_BASE + "data_HX5.pkl"), HX6=("HF/HX6.pkl", HF_URL_BASE + "data_HX6.pkl"), QM7=("GDB13/qm7.mat", "http://quantum-machine.org/data/qm7.mat"), QM9=("GDB13/qm9.pkl", "https://ndownloader.figshare.com/files/7003292"), QM9_bonds=("GDB13/qm9_bonds.npz", "https://berkeley.box.com/shared/static/" "2mq7cgd8aypqy7js1mr8lztkn8ky34qj.npz"), QM9_properties=("GDB13/qm9_properties.npy", "https://berkeley.box.com/shared/static/" "ptjxewyrg1ahcs2lkr6fq1jyuyfnoemo.npy"), QM9_folds=("GDB13/qm9_folds.npy", "https://berkeley.box.com/shared/static/" "9b5ltx6ia45d2rfx5zsqxcfvpi6gp58w.npy") ) #def https_open_with_auth(url, user, passwd): # request = urllib2.Request(url) # user_pass = base64.b64encode('{}:{}'.format(user, passwd)) # request.add_header("Authorization", "Basic {}".format(user_pass)) # return urllib2.urlopen(request) def _find_file(paths, filename): abs_paths = [os.path.join(path, filename) for path in paths] for filepath in abs_paths: if os.path.exists(filepath): return filepath return None def _get_first_writeable_path(paths, filename): abs_paths = [os.path.join(path, filename) for path in paths] dirs = [os.path.dirname(filepath) for filepath in abs_paths] # basenames = [os.path.basename(filepath) for filepath in abs_paths] errors = [] for dirname, filename in zip(dirs, abs_paths): try: if not os.path.exists(dirname): os.makedirs(dirname) existed = os.path.exists(filename) with open(filename, 'a'): pass if not existed: os.remove(filename) return filename except Exception as e: errors.append(e) raise OSError( "CheML could not store in any of the following directories:\n\n" + "\n".join(dirs)) def _download(url, filename): try: f = urlopen(url) with open(filename, 'wb') as local_file: local_file.write(f.read()) except URLError as e: raise except HTTPError as e: raise def _tar_decompress(local_name, filename): tar = tarfile.open(local_name) # Assume single file member in tar file members = tar.getnames() if len(members) == 1: content = tar.extractfile(members[0]) with open(filename, 'wb') as local_file: local_file.write(content.read()) else: tar.extractall(filename) os.remove(local_name) def _get_or_download_dataset(dataset_name, path=None, suffix=None): rel_path, url = dataset_info[dataset_name] if path is None: paths = get_data_dirs() else: paths = [path] filename = _find_file(paths, rel_path) if filename is not None: return filename else: filename = _get_first_writeable_path(paths, rel_path) if suffix is not None: local_name = filename + suffix else: local_name = filename print("Downloading {} to {}...".format(url, local_name)) _download(url, local_name) print("... done.") if suffix is not None: if 'tar.gz' in suffix: print("Decompress and tar file {}...".format(local_name)) _tar_decompress(local_name, filename) print("... done.") return filename def _open_pickle(filename): # hack from http://stackoverflow.com/questions/11305790/pickle-incompatability-of-numpy-arrays-between-python-2-and-3 # Needs to be extensively tested between versions with open(filename, 'rb') as f: try: u = pickle._Unpickler(f) u.encoding = 'latin1' p = u.load() except AttributeError: p = pickle.load(f) return Bunch(**p) def load_HF2(path=None, large=False): dataset_name = 'HF2_7K' if large else 'HF2_1K' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HF3(path=None, large=False): dataset_name = 'HF3_10K' if large else 'HF3_1K' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HF4(path=None, large=False): dataset_name = 'HF4_10K' if large else 'HF4_1K' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HF5(path=None, large=False): dataset_name = 'HF5_10K' if large else 'HF5_1K' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HF6(path=None, large=False): dataset_name = 'HF6_10K' if large else 'HF6_1K' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HX2(path=None): filename = _get_or_download_dataset('HX2', path=path) return _open_pickle(filename) def load_HX3(path=None): dataset_name = 'HX3' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HX4(path=None): dataset_name = 'HX4' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HX5(path=None): dataset_name = 'HX5' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def load_HX6(path=None): dataset_name = 'HX6' filename = _get_or_download_dataset(dataset_name, path=path) return _open_pickle(filename) def _gdb_align(bunch, align, only_planar, planarity_tol): sys.stdout.flush() pca = PCA() keep_molecule = [] has_bonds = "O" in bunch for c,(positions, charges) in enumerate(zip(bunch.R, bunch.Z)): transformed = np.vstack([ pca.fit_transform(positions[charges != 0]), np.zeros([(charges == 0).sum(), 3])]) if has_bonds: transformed_bonds=np.vstack([pca.transform(bunch.B[c,bunch.O[c]!=0].reshape((-1,3))).reshape((-1,2,3)), np.zeros([(bunch.O[c] == 0).sum(),2 , 3])]) # the following evaluates how much variance is in the first two axes # before this, the algorithm was also using zero positions, leading # to 454 planar molecules (for QM7): # pca.fit(positions).explained_variance_ratio_[:2].sum() # # currently, the algorithm yields 415 planar molecules var_2D = pca.explained_variance_ratio_[:2].sum() keep = (not only_planar) or var_2D > 1 - planarity_tol keep_molecule.append(keep) if align and keep: positions[:] = transformed if has_bonds: bunch.B[c,:] = transformed_bonds return keep_molecule def load_qm7(path=None, align=False, only_planar=False, planarity_tol=.01): # import ipdb;ipdb.set_trace() filename = _get_or_download_dataset("QM7", path=path) qm7_file = loadmat(filename) qm7_bunch = Bunch(**{k:v for k, v in qm7_file.items() if k in ['P', 'X', 'T', 'Z', 'R']}) if align or only_planar: keep_molecule = _gdb_align(qm7_bunch, align, only_planar, planarity_tol) if only_planar: keep_molecule = np.array(keep_molecule) qm7_bunch['X'] = qm7_bunch.X[keep_molecule] qm7_bunch['T'] = qm7_bunch.T[:, keep_molecule].ravel() qm7_bunch['Z'] = qm7_bunch.Z[keep_molecule] qm7_bunch['R'] = qm7_bunch.R[keep_molecule] new_molecule_indices = -np.ones_like(keep_molecule, dtype='int') new_molecule_indices[keep_molecule] = np.arange(keep_molecule.sum()) P = [new_molecule_indices[p[keep_molecule[p]]] for p in qm7_bunch['P']] qm7_bunch['P'] = P CV = [] for i in range(len(P)): train = np.concatenate([p for j, p in enumerate(P) if j != i]) test = P[i] CV.append((train, test)) qm7_bunch['CV'] = CV return qm7_bunch def load_qm9(path=None, align=False, only_planar=False, planarity_tol=.01): # import ipdb;ipdb.set_trace() filename = _get_or_download_dataset("QM9", path=path, suffix='.tar.gz') filename_bonds = _get_or_download_dataset("QM9_bonds", path=path, suffix=None) filename_properties = _get_or_download_dataset("QM9_properties", path=path, suffix=None) filename_folds = _get_or_download_dataset("QM9_folds", path=path, suffix=None) qm9_file = _open_pickle(filename) qm9_bonds = np.load(filename_bonds) qm9_file['R'] = qm9_file['xyz'] qm9_file['B'] = qm9_bonds['bond_atoms'] qm9_file['O'] = qm9_bonds['bond_orders'] qm9_file['T'] = qm9_file['E'] qm9_bunch = Bunch(**{k:v for k, v in qm9_file.items() if k in ['R', 'Z', 'T','B','O']}) qm9_properties = np.load(filename_properties) property_names = qm9_properties.dtype.names qm9_bunch.update(**{p:qm9_properties[p] for p in property_names if p not in ('A', 'B', 'C')}) qm9_bunch.update({'{}rot'.format(a): qm9_properties[a] for a in ('A', 'B', 'C')}) qm9_folds = np.load(filename_folds) qm9_bunch.update({'P_stratified_Ua': qm9_folds}) if align or only_planar: keep_molecule = _gdb_align(qm9_bunch, align, only_planar, planarity_tol) if only_planar: keep_molecule = np.array(keep_molecule) qm9_bunch['T'] = qm9_bunch.T[keep_molecule] qm9_bunch['Z'] = qm9_bunch.Z[keep_molecule] qm9_bunch['R'] = qm9_bunch.R[keep_molecule] qm9_bunch['B'] = qm9_bunch.B[keep_molecule] qm9_bunch['O'] = qm9_bunch.O[keep_molecule] return qm9_bunch
bsd-3-clause
8,838,288,708,644,250,000
34.260234
121
0.610333
false
socialsensor/community-evolution-analysis
python/main_NONadaptive.py
1
5906
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #------------------------------------------------------------------------------- # Name: # Purpose: This .py file is the main Framework file # It uses a straightforward timeslot partitioning algorithm # # Required libs: python-dateutil, numpy,matplotlib,pyparsing # Author: konkonst # # Created: 20/08/2013 # Copyright: (c) ITI (CERTH) 2013 # Licence: <apache licence 2.0> #------------------------------------------------------------------------------- import time,os,pickle from CommunityRanking_NONadaptive import communityranking '''PARAMETERS''' # User sets json dataset folder dataset_path = "./yesallwomen" #Construct the data class from scratch: 1-yes / 2-only the evolution / 3-perform only the ranking dataextractStage = 3 #User sets desired number of displayed top communities numTopComms = 10 #User sets how many timeslots back the framework should search prevTimeslots = 3 #Number of labels on the x-axis of the activity distribution xLablNum = 20 #User decides whether to simplify the jsons into readable txts: 1-on / 0-off (time consuming) simplify_json = 0 #If json files have irregular timestamps, set rankIrregularTime to 1 in order to order them chronologically rankIrregularTime = 0 print(dataset_path) #User sets desired time intervals if applicable. Else the whole dataset is considered if dataset_path == "./testDataset": timeSeg = [86400] timeInterval=86400 if dataset_path == "./snowDataset": timeSeg = [3600,3600*2,3600*3,3600*6] timeInterval=3600 if dataset_path == "./us_elections": timeSeg = [2700, 3600, 3600*3, 3600*6] timeInterval=3600*3 if dataset_path == "./greek_elections": timeSeg = [3600*3,3600*6,3600*12,86400] timeMin = '04:00-19/01/15' timeMax = '14:45-29/01/15' timeInterval=3600*12 if dataset_path == "./greekDefault": timeSeg = [86400] timeMin = '25/06/2015 00:00' timeInterval=86400 if dataset_path == "./sherlock": timeSeg = [3600*6,3600*12,86400,86400*5] timeMin = '31/12/2013 00:00' timeMax = '14/12/2014 00:00' timeInterval=3600*12 if dataset_path == "./yesallwomen": timeSeg = [3600*6,3600*12,86400] timeInterval=86400 '''Functions''' t = time.time() if dataextractStage>1: print('Please check the parameters and specifically that the timeInterval is the one chosen in stage1.') if timeInterval < 3600: timeNum = timeInterval / 60 timeTitle = 'per' + str(int(timeNum)) + 'mins' elif timeInterval >= 3600 and timeInterval < 86400: timeNum = timeInterval / 3600 timeTitle = 'per' + str(int(timeNum)) + 'hours' elif timeInterval >= 86400 and timeInterval < 604800: timeNum = timeInterval / 86400 timeTitle = 'per' + str(int(timeNum)) + 'days' elif timeInterval>= 604800 and timeInterval < 2592000: timeNum = timeInterval / 604800 timeTitle = 'per' + str(int(timeNum)) + 'weeks' else: timeNum = timeInterval / 2592000 timeTitle = 'per' + str(int(timeNum)) + 'months' #fix earliest and latest time limits if available try: timeMin = dateutil.parser.parse(timeMin,dayfirst=True) timeMin = int(time.mktime(timeMin.timetuple())) except: timeMin = 0 pass try: timeMax = dateutil.parser.parse(timeMax,dayfirst=True) timeMax = int(time.mktime(timeMax.timetuple())) except: timeMax = time.time() pass if dataextractStage==1:#If the basic data(authors, mentions, time) has NOT been created if not os.path.exists(dataset_path + "/data/nonadaptive/results/"): os.makedirs(dataset_path + "/data/nonadaptive/results/") if not os.path.exists(dataset_path + "/data/nonadaptive/tmp/"): os.makedirs(dataset_path + "/data/nonadaptive/tmp/") try: timeMin data = communityranking.from_json(dataset_path, timeSeg, simplify_json,rankIrregularTime,timeMin=timeMin,timeMax=timeMax) except NameError: data = communityranking.from_json(dataset_path, timeSeg, simplify_json,rankIrregularTime) dataPck = open(dataset_path + "/data/nonadaptive/tmp/data.pck", "wb") pickle.dump(data, dataPck, protocol = 2) dataPck.close() elapsed = time.time() - t print('Stage 1: %.2f seconds' % elapsed) dataEvol=data.evol_detect(prevTimeslots, xLablNum) del(data) dataEvolPck = open(dataset_path + "/data/nonadaptive/tmp/dataEvol_prev"+str(prevTimeslots)+dataEvol.fileTitle+".pck", "wb") pickle.dump(dataEvol, dataEvolPck, protocol = 2) dataEvolPck.close() elapsed = time.time() - t - elapsed print('Stage 2: %.2f seconds' % elapsed) elif dataextractStage==2:#If the basic data (authors, mentions, time) has been created if not os.path.exists(dataset_path + '/data/nonadaptive/tmp/dataComm_'+timeTitle+'.pck'): data = pickle.load(open(dataset_path + "/data/nonadaptive/tmp/data.pck", 'rb')) else: data = pickle.load(open(dataset_path + '/data/nonadaptive/tmp/dataComm_'+timeTitle+'.pck', "rb")) data.dataset_path=dataset_path dataEvol=data.evol_detect(prevTimeslots, xLablNum) del(data) dataEvolPck = open(dataset_path + "/data/nonadaptive/tmp/dataEvol_prev"+str(prevTimeslots)+timeTitle+".pck", "wb") pickle.dump(dataEvol, dataEvolPck, protocol = 2) dataEvolPck.close() elapsed = time.time() - t print('Stage 2: %.2f seconds' % elapsed) else:#Only ranking and heat map creation beyond this point try: dataEvol = pickle.load(open(dataset_path + "/data/nonadaptive/tmp/dataEvol_prev"+str(prevTimeslots)+timeTitle+".pck", 'rb')) except: print('Please check the parameters and specifically that the timeInterval is the one chosen in stage1.') dataEvol.dataset_path=dataset_path print("Ranking Commences") rankedCommunities = dataEvol.commRanking(numTopComms, prevTimeslots, xLablNum) elapsed = time.time() - t print('Elapsed: %.2f seconds' % elapsed)
apache-2.0
5,782,171,690,535,982,000
40.591549
132
0.677955
false
dwillmer/numpy
numpy/core/memmap.py
6
11309
from __future__ import division, absolute_import, print_function import numpy as np from .numeric import uint8, ndarray, dtype from numpy.compat import long, basestring, is_pathlib_path __all__ = ['memmap'] dtypedescr = dtype valid_filemodes = ["r", "c", "r+", "w+"] writeable_filemodes = ["r+", "w+"] mode_equivalents = { "readonly":"r", "copyonwrite":"c", "readwrite":"r+", "write":"w+" } class memmap(ndarray): """Create a memory-map to an array stored in a *binary* file on disk. Memory-mapped files are used for accessing small segments of large files on disk, without reading the entire file into memory. NumPy's memmap's are array-like objects. This differs from Python's ``mmap`` module, which uses file-like objects. This subclass of ndarray has some unpleasant interactions with some operations, because it doesn't quite fit properly as a subclass. An alternative to using this subclass is to create the ``mmap`` object yourself, then create an ndarray with ndarray.__new__ directly, passing the object created in its 'buffer=' parameter. This class may at some point be turned into a factory function which returns a view into an mmap buffer. Delete the memmap instance to close. Parameters ---------- filename : str, file-like object, or pathlib.Path instance The file name or file object to be used as the array data buffer. dtype : data-type, optional The data-type used to interpret the file contents. Default is `uint8`. mode : {'r+', 'r', 'w+', 'c'}, optional The file is opened in this mode: +------+-------------------------------------------------------------+ | 'r' | Open existing file for reading only. | +------+-------------------------------------------------------------+ | 'r+' | Open existing file for reading and writing. | +------+-------------------------------------------------------------+ | 'w+' | Create or overwrite existing file for reading and writing. | +------+-------------------------------------------------------------+ | 'c' | Copy-on-write: assignments affect data in memory, but | | | changes are not saved to disk. The file on disk is | | | read-only. | +------+-------------------------------------------------------------+ Default is 'r+'. offset : int, optional In the file, array data starts at this offset. Since `offset` is measured in bytes, it should normally be a multiple of the byte-size of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of file are valid; The file will be extended to accommodate the additional data. By default, ``memmap`` will start at the beginning of the file, even if ``filename`` is a file pointer ``fp`` and ``fp.tell() != 0``. shape : tuple, optional The desired shape of the array. If ``mode == 'r'`` and the number of remaining bytes after `offset` is not a multiple of the byte-size of `dtype`, you must specify `shape`. By default, the returned array will be 1-D with the number of elements determined by file size and data-type. order : {'C', 'F'}, optional Specify the order of the ndarray memory layout: :term:`row-major`, C-style or :term:`column-major`, Fortran-style. This only has an effect if the shape is greater than 1-D. The default order is 'C'. Attributes ---------- filename : str or pathlib.Path instance Path to the mapped file. offset : int Offset position in the file. mode : str File mode. Methods ------- flush Flush any changes in memory to file on disk. When you delete a memmap object, flush is called first to write changes to disk before removing the object. Notes ----- The memmap object can be used anywhere an ndarray is accepted. Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns ``True``. Memory-mapped files cannot be larger than 2GB on 32-bit systems. When a memmap causes a file to be created or extended beyond its current size in the filesystem, the contents of the new part are unspecified. On systems with POSIX filesystem semantics, the extended part will be filled with zero bytes. Examples -------- >>> data = np.arange(12, dtype='float32') >>> data.resize((3,4)) This example uses a temporary file so that doctest doesn't write files to your directory. You would use a 'normal' filename. >>> from tempfile import mkdtemp >>> import os.path as path >>> filename = path.join(mkdtemp(), 'newfile.dat') Create a memmap with dtype and shape that matches our data: >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) >>> fp memmap([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]], dtype=float32) Write data to memmap array: >>> fp[:] = data[:] >>> fp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]], dtype=float32) >>> fp.filename == path.abspath(filename) True Deletion flushes memory changes to disk before removing the object: >>> del fp Load the memmap and verify data was stored: >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) >>> newfp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]], dtype=float32) Read-only memmap: >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) >>> fpr.flags.writeable False Copy-on-write memmap: >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) >>> fpc.flags.writeable True It's possible to assign to copy-on-write array, but values are only written into the memory copy of the array, and not written to disk: >>> fpc memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]], dtype=float32) >>> fpc[0,:] = 0 >>> fpc memmap([[ 0., 0., 0., 0.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]], dtype=float32) File on disk is unchanged: >>> fpr memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]], dtype=float32) Offset into a memmap: >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) >>> fpo memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) """ __array_priority__ = -100.0 def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, shape=None, order='C'): # Import here to minimize 'import numpy' overhead import mmap import os.path try: mode = mode_equivalents[mode] except KeyError: if mode not in valid_filemodes: raise ValueError("mode must be one of %s" % (valid_filemodes + list(mode_equivalents.keys()))) if hasattr(filename, 'read'): fid = filename own_file = False elif is_pathlib_path(filename): fid = filename.open((mode == 'c' and 'r' or mode)+'b') own_file = True else: fid = open(filename, (mode == 'c' and 'r' or mode)+'b') own_file = True if (mode == 'w+') and shape is None: raise ValueError("shape must be given") fid.seek(0, 2) flen = fid.tell() descr = dtypedescr(dtype) _dbytes = descr.itemsize if shape is None: bytes = flen - offset if (bytes % _dbytes): fid.close() raise ValueError("Size of available data is not a " "multiple of the data-type size.") size = bytes // _dbytes shape = (size,) else: if not isinstance(shape, tuple): shape = (shape,) size = 1 for k in shape: size *= k bytes = long(offset + size*_dbytes) if mode == 'w+' or (mode == 'r+' and flen < bytes): fid.seek(bytes - 1, 0) fid.write(np.compat.asbytes('\0')) fid.flush() if mode == 'c': acc = mmap.ACCESS_COPY elif mode == 'r': acc = mmap.ACCESS_READ else: acc = mmap.ACCESS_WRITE start = offset - offset % mmap.ALLOCATIONGRANULARITY bytes -= start offset -= start mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, offset=offset, order=order) self._mmap = mm self.offset = offset self.mode = mode if isinstance(filename, basestring): self.filename = os.path.abspath(filename) elif is_pathlib_path(filename): self.filename = filename.resolve() # py3 returns int for TemporaryFile().name elif (hasattr(filename, "name") and isinstance(filename.name, basestring)): self.filename = os.path.abspath(filename.name) # same as memmap copies (e.g. memmap + 1) else: self.filename = None if own_file: fid.close() return self def __array_finalize__(self, obj): if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): self._mmap = obj._mmap self.filename = obj.filename self.offset = obj.offset self.mode = obj.mode else: self._mmap = None self.filename = None self.offset = None self.mode = None def flush(self): """ Write any changes in the array to the file on disk. For further information, see `memmap`. Parameters ---------- None See Also -------- memmap """ if self.base is not None and hasattr(self.base, 'flush'): self.base.flush() def __array_wrap__(self, arr, context=None): arr = super(memmap, self).__array_wrap__(arr, context) # Return a memmap if a memmap was given as the output of the # ufunc. Leave the arr class unchanged if self is not a memmap # to keep original memmap subclasses behavior if self is arr or type(self) is not memmap: return arr # Return scalar instead of 0d memmap, e.g. for np.sum with # axis=None if arr.shape == (): return arr[()] # Return ndarray otherwise return arr.view(np.ndarray) def __getitem__(self, index): res = super(memmap, self).__getitem__(index) if type(res) is memmap and res._mmap is None: return res.view(type=ndarray) return res
bsd-3-clause
6,559,191,744,692,861,000
32.859281
83
0.530728
false
sandeepdsouza93/TensorFlow-15712
tensorflow/python/kernel_tests/cholesky_op_test.py
6
5897
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.tf.Cholesky.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf class CholeskyOpTest(tf.test.TestCase): def _verifyCholeskyBase(self, sess, x, chol, verification): chol_np, verification_np = sess.run([chol, verification]) self.assertAllClose(x, verification_np) self.assertShapeEqual(x, chol) # Check that the cholesky is lower triangular, and has positive diagonal # elements. if chol_np.shape[-1] > 0: chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2], chol_np.shape[-1])) for chol_matrix in chol_reshaped: self.assertAllClose(chol_matrix, np.tril(chol_matrix)) self.assertTrue((np.diag(chol_matrix) > 0.0).all()) def _verifyCholesky(self, x): # Verify that LL^T == x. with self.test_session() as sess: chol = tf.cholesky(x) verification = tf.matmul(chol, chol, adjoint_b=True) self._verifyCholeskyBase(sess, x, chol, verification) def testBasic(self): self._verifyCholesky(np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])) def testBatch(self): simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2) self._verifyCholesky(simple_array) self._verifyCholesky(np.vstack((simple_array, simple_array))) odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]]) self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array))) # Generate random positive-definite matrices. matrices = np.random.rand(10, 5, 5) for i in xrange(10): matrices[i] = np.dot(matrices[i].T, matrices[i]) self._verifyCholesky(matrices) def testNonSquareMatrix(self): with self.assertRaises(ValueError): tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]])) with self.assertRaises(ValueError): tf.cholesky( np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]] ])) def testWrongDimensions(self): tensor3 = tf.constant([1., 2.]) with self.assertRaises(ValueError): tf.cholesky(tensor3) with self.assertRaises(ValueError): tf.cholesky(tensor3) def testNotInvertible(self): # The input should be invertible. with self.test_session(): with self.assertRaisesOpError("LLT decomposition was not successful. The" " input might not be valid."): # All rows of the matrix below add to zero self._verifyCholesky( np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]])) def testEmpty(self): self._verifyCholesky(np.empty([0, 2, 2])) self._verifyCholesky(np.empty([2, 0, 0])) class CholeskyGradTest(tf.test.TestCase): _backprop_block_size = 32 def getShapes(self, shapeList): return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList) def testSmallMatrices(self): np.random.seed(0) shapes = self.getShapes([1, 2, 10]) self.runFiniteDifferences(shapes) def testOneBlockMatrices(self): np.random.seed(0) shapes = self.getShapes([self._backprop_block_size + 1]) self.runFiniteDifferences( shapes, dtypes=(tf.float32, tf.float64), scalarTest=True) def testTwoBlockMatrixFloat(self): np.random.seed(0) shapes = self.getShapes([2 * self._backprop_block_size + 1]) self.runFiniteDifferences(shapes, dtypes=(tf.float32,), scalarTest=True) def testTwoBlockMatrixDouble(self): np.random.seed(0) shapes = self.getShapes([2 * self._backprop_block_size + 1]) self.runFiniteDifferences(shapes, dtypes=(tf.float64,), scalarTest=True) def runFiniteDifferences(self, shapes, dtypes=(tf.float32, tf.float64), scalarTest=False): with self.test_session(use_gpu=False): for shape in shapes: for batch in False, True: for dtype in dtypes: if not scalarTest: x = tf.constant(np.random.randn(shape[0], shape[1]), dtype) tensor = tf.matmul(x, tf.transpose(x)) / shape[0] else: # This is designed to be a faster test for larger matrices. x = tf.constant(np.random.randn(), dtype) R = tf.constant(np.random.randn(shape[0], shape[1]), dtype) e = tf.mul(R, x) tensor = tf.matmul(e, tf.transpose(e)) / shape[0] # Inner-most matrices in tensor are positive definite. if batch: tensor = tf.tile(tf.expand_dims(tensor, 0), [4, 1, 1]) y = tf.cholesky(tensor) if scalarTest: y = tf.reduce_mean(y) error = tf.test.compute_gradient_error(x, x._shape_as_list(), y, y._shape_as_list()) tf.logging.info("error = %f", error) if dtype == tf.float64: self.assertLess(error, 1e-5) else: self.assertLess(error, 3e-3) if __name__ == "__main__": tf.test.main()
apache-2.0
-1,988,833,859,229,854,500
37.542484
80
0.601831
false
andersx/dftbfit
scripts/calc_charges.py
1
2154
import os import sys import cPickle import numpy as np import openbabel import pybel sys.path.append('/home/andersx/dev/charmm-dftb-py') from sccdftb_api import run_charmm, ATOMS def load_pickle(filename): f = open(filename,"rb") p = cPickle.load(f) f.close() return(p) TYPEVALS = dict() TYPEVALS["H"] = 1 TYPEVALS["C"] = 10 TYPEVALS["N"] = 100 TYPEVALS["O"] = 1000 TYPEVALS["S"] = 10000 def get_typeval(obatom): name = obatom.GetType()[0] return TYPEVALS[name] if __name__ == "__main__": np.set_printoptions(formatter={'float': '{: 0.3f}'.format}, linewidth=1000000) gaussian_mulliken = load_pickle("charges_gaussian.pickle") # NPA gaussian_mulliken = load_pickle("charges_3ob_npa.pickle") # charmm_mulliken = load_pickle("charges_3ob.pickle") charmm_mulliken = load_pickle("charges_test.pickle") path = "xyz_sorted/" listing = os.listdir(path) for filename in sorted(listing): if filename.endswith(".xyz"): logfile = filename.replace(".xyz", ".log") dftb_mulliken = charmm_mulliken[filename] pbe_mulliken = gaussian_mulliken[logfile] # NPA pbe_mulliken = gaussian_mulliken[filename] qdiff = np.array(dftb_mulliken) - np.array(pbe_mulliken) max_qdiff = max(qdiff.min(), qdiff.max(), key=abs) print print "%-30s %7.4f" % (filename, max_qdiff), qdiff print "%39s" % "DFTB3/3OB", np.array(dftb_mulliken) print "%39s" % "PBE/aug-cc-pVTZ", np.array(pbe_mulliken) mol = pybel.readfile("xyz", path + filename).next() for i, atom in enumerate(mol): type_int = 0 print "%-6s bonds to: " % (atom.OBAtom.GetType()), bonds = "" for obatom in openbabel.OBAtomAtomIter(atom.OBAtom): bonds += "%-6s" % obatom.GetType() type_int += get_typeval(obatom) while len(bonds) < 24: bonds += "- " print "%-24s" % bonds, print "%7.3f ID: %05i" % (qdiff[i], type_int)
bsd-2-clause
5,016,058,834,208,815,000
26.615385
82
0.566852
false
davidnmurray/iris
lib/iris/analysis/cartography.py
2
41596
# (C) British Crown Copyright 2010 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Various utilities and numeric transformations relevant to cartography. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa from collections import namedtuple import copy import warnings import cf_units import numpy as np import numpy.ma as ma import cartopy.img_transform import cartopy.crs as ccrs import iris.analysis import iris.coords import iris.coord_systems import iris.exceptions # This value is used as a fall-back if the cube does not define the earth DEFAULT_SPHERICAL_EARTH_RADIUS = 6367470 # TODO: This should not be necessary, as CF is always in meters DEFAULT_SPHERICAL_EARTH_RADIUS_UNIT = cf_units.Unit('m') # Distance differentials for coordinate systems at specified locations DistanceDifferential = namedtuple('DistanceDifferential', 'dx1 dy1 dx2 dy2') # Partial differentials between coordinate systems PartialDifferential = namedtuple('PartialDifferential', 'dx1 dy1') def wrap_lons(lons, base, period): """ Wrap longitude values into the range between base and base+period. .. testsetup:: import numpy as np from iris.analysis.cartography import wrap_lons For example: >>> print(wrap_lons(np.array([185, 30, -200, 75]), -180, 360)) [-175. 30. 160. 75.] """ # It is important to use 64bit floating precision when changing a floats # numbers range. lons = lons.astype(np.float64) return ((lons - base + period * 2) % period) + base def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat): """ Convert rotated-pole lons and lats to unrotated ones. Example:: lons, lats = unrotate_pole(grid_lons, grid_lats, pole_lon, pole_lat) .. note:: Uses proj.4 to perform the conversion. """ src_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, pole_latitude=pole_lat) target_proj = ccrs.Geodetic() res = target_proj.transform_points(x=rotated_lons, y=rotated_lats, src_crs=src_proj) unrotated_lon = res[..., 0] unrotated_lat = res[..., 1] return unrotated_lon, unrotated_lat def rotate_pole(lons, lats, pole_lon, pole_lat): """ Convert arrays of lons and lats to ones on a rotated pole. Example:: grid_lons, grid_lats = rotate_pole(lons, lats, pole_lon, pole_lat) .. note:: Uses proj.4 to perform the conversion. """ src_proj = ccrs.Geodetic() target_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, pole_latitude=pole_lat) res = target_proj.transform_points(x=lons, y=lats, src_crs=src_proj) rotated_lon = res[..., 0] rotated_lat = res[..., 1] return rotated_lon, rotated_lat def _get_lat_lon_coords(cube): lat_coords = [coord for coord in cube.coords() if "latitude" in coord.name()] lon_coords = [coord for coord in cube.coords() if "longitude" in coord.name()] if len(lat_coords) > 1 or len(lon_coords) > 1: raise ValueError( "Calling _get_lat_lon_coords() with multiple lat or lon coords" " is currently disallowed") lat_coord = lat_coords[0] lon_coord = lon_coords[0] return (lat_coord, lon_coord) def _xy_range(cube, mode=None): """ Return the x & y range of this Cube. Args: * cube - The cube for which to calculate xy extents. Kwargs: * mode - If the coordinate has bounds, set this to specify the min/max calculation. Set to iris.coords.POINT_MODE or iris.coords.BOUND_MODE. """ # Helpful error if we have an inappropriate CoordSystem cs = cube.coord_system("CoordSystem") cs_valid_types = (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS) if ((cs is not None) and not isinstance(cs, cs_valid_types)): raise ValueError( "Latlon coords cannot be found with {0}.".format(type(cs))) x_coord, y_coord = cube.coord(axis="X"), cube.coord(axis="Y") cs = cube.coord_system('CoordSystem') if x_coord.has_bounds() != y_coord.has_bounds(): raise ValueError( 'Cannot get the range of the x and y coordinates if they do ' 'not have the same presence of bounds.') if x_coord.has_bounds(): if mode not in [iris.coords.POINT_MODE, iris.coords.BOUND_MODE]: raise ValueError( 'When the coordinate has bounds, please specify "mode".') _mode = mode else: _mode = iris.coords.POINT_MODE # Get the x and y grids if isinstance(cs, iris.coord_systems.RotatedGeogCS): if _mode == iris.coords.POINT_MODE: x, y = get_xy_grids(cube) else: x, y = get_xy_contiguous_bounded_grids(cube) else: if _mode == iris.coords.POINT_MODE: x = x_coord.points y = y_coord.points else: x = x_coord.bounds y = y_coord.bounds # Get the x and y range if getattr(x_coord, 'circular', False): x_range = (np.min(x), np.min(x) + x_coord.units.modulus) else: x_range = (np.min(x), np.max(x)) y_range = (np.min(y), np.max(y)) return (x_range, y_range) def get_xy_grids(cube): """ Return 2D X and Y points for a given cube. Args: * cube - The cube for which to generate 2D X and Y points. Example:: x, y = get_xy_grids(cube) """ x_coord, y_coord = cube.coord(axis="X"), cube.coord(axis="Y") x = x_coord.points y = y_coord.points if x.ndim == y.ndim == 1: # Convert to 2D. x, y = np.meshgrid(x, y) elif x.ndim == y.ndim == 2: # They are already in the correct shape. pass else: raise ValueError("Expected 1D or 2D XY coords") return (x, y) def get_xy_contiguous_bounded_grids(cube): """ Return 2d arrays for x and y bounds. Returns array of shape (n+1, m+1). Example:: xs, ys = get_xy_contiguous_bounded_grids(cube) """ x_coord, y_coord = cube.coord(axis="X"), cube.coord(axis="Y") x = x_coord.contiguous_bounds() y = y_coord.contiguous_bounds() x, y = np.meshgrid(x, y) return (x, y) def _quadrant_area(radian_colat_bounds, radian_lon_bounds, radius_of_earth): """Calculate spherical segment areas. - radian_colat_bounds -- [n,2] array of colatitude bounds (radians) - radian_lon_bounds -- [n,2] array of longitude bounds (radians) - radius_of_earth -- radius of the earth (currently assumed spherical) Area weights are calculated for each lat/lon cell as: .. math:: r^2 (lon_1 - lon_0) ( cos(colat_0) - cos(colat_1)) The resulting array will have a shape of *(radian_colat_bounds.shape[0], radian_lon_bounds.shape[0])* The calculations are done at 64 bit precision and the returned array will be of type numpy.float64. """ # ensure pairs of bounds if (radian_colat_bounds.shape[-1] != 2 or radian_lon_bounds.shape[-1] != 2 or radian_colat_bounds.ndim != 2 or radian_lon_bounds.ndim != 2): raise ValueError("Bounds must be [n,2] array") # fill in a new array of areas radius_sqr = radius_of_earth ** 2 radian_colat_64 = radian_colat_bounds.astype(np.float64) radian_lon_64 = radian_lon_bounds.astype(np.float64) ylen = np.cos(radian_colat_64[:, 0]) - np.cos(radian_colat_64[:, 1]) xlen = radian_lon_64[:, 1] - radian_lon_64[:, 0] areas = radius_sqr * np.outer(ylen, xlen) # we use abs because backwards bounds (min > max) give negative areas. return np.abs(areas) def area_weights(cube, normalize=False): """ Returns an array of area weights, with the same dimensions as the cube. This is a 2D lat/lon area weights array, repeated over the non lat/lon dimensions. Args: * cube (:class:`iris.cube.Cube`): The cube to calculate area weights for. Kwargs: * normalize (False/True): If False, weights are grid cell areas. If True, weights are grid cell areas divided by the total grid area. The cube must have coordinates 'latitude' and 'longitude' with bounds. Area weights are calculated for each lat/lon cell as: .. math:: r^2 cos(lat_0) (lon_1 - lon_0) - r^2 cos(lat_1) (lon_1 - lon_0) Currently, only supports a spherical datum. Uses earth radius from the cube, if present and spherical. Defaults to iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS. """ # Get the radius of the earth cs = cube.coord_system("CoordSystem") if isinstance(cs, iris.coord_systems.GeogCS): if cs.inverse_flattening != 0.0: warnings.warn("Assuming spherical earth from ellipsoid.") radius_of_earth = cs.semi_major_axis elif (isinstance(cs, iris.coord_systems.RotatedGeogCS) and (cs.ellipsoid is not None)): if cs.ellipsoid.inverse_flattening != 0.0: warnings.warn("Assuming spherical earth from ellipsoid.") radius_of_earth = cs.ellipsoid.semi_major_axis else: warnings.warn("Using DEFAULT_SPHERICAL_EARTH_RADIUS.") radius_of_earth = DEFAULT_SPHERICAL_EARTH_RADIUS # Get the lon and lat coords and axes try: lat, lon = _get_lat_lon_coords(cube) except IndexError: raise ValueError('Cannot get latitude/longitude ' 'coordinates from cube {!r}.'.format(cube.name())) if lat.ndim > 1: raise iris.exceptions.CoordinateMultiDimError(lat) if lon.ndim > 1: raise iris.exceptions.CoordinateMultiDimError(lon) lat_dim = cube.coord_dims(lat) lat_dim = lat_dim[0] if lat_dim else None lon_dim = cube.coord_dims(lon) lon_dim = lon_dim[0] if lon_dim else None if not (lat.has_bounds() and lon.has_bounds()): msg = ("Coordinates {!r} and {!r} must have bounds to determine " "the area weights.".format(lat.name(), lon.name())) raise ValueError(msg) # Convert from degrees to radians lat = lat.copy() lon = lon.copy() for coord in (lat, lon): if coord.units in (cf_units.Unit('degrees'), cf_units.Unit('radians')): coord.convert_units('radians') else: msg = ("Units of degrees or radians required, coordinate " "{!r} has units: {!r}".format(coord.name(), coord.units.name)) raise ValueError(msg) # Create 2D weights from bounds. # Use the geographical area as the weight for each cell # Convert latitudes to co-latitude. I.e from -90 --> +90 to 0 --> pi ll_weights = _quadrant_area(lat.bounds + np.pi / 2., lon.bounds, radius_of_earth) # Normalize the weights if necessary. if normalize: ll_weights /= ll_weights.sum() # Now we create an array of weights for each cell. This process will # handle adding the required extra dimensions and also take care of # the order of dimensions. broadcast_dims = [x for x in (lat_dim, lon_dim) if x is not None] wshape = [] for idim, dim in zip((0, 1), (lat_dim, lon_dim)): if dim is not None: wshape.append(ll_weights.shape[idim]) ll_weights = ll_weights.reshape(wshape) broad_weights = iris.util.broadcast_to_shape(ll_weights, cube.shape, broadcast_dims) return broad_weights def cosine_latitude_weights(cube): """ Returns an array of latitude weights, with the same dimensions as the cube. The weights are the cosine of latitude. These are n-dimensional latitude weights repeated over the dimensions not covered by the latitude coordinate. The cube must have a coordinate with 'latitude' in the name. Out of range values (greater than 90 degrees or less than -90 degrees) will be clipped to the valid range. Weights are calculated for each latitude as: .. math:: w_l = \cos \phi_l Examples: Compute weights suitable for averaging type operations:: from iris.analysis.cartography import cosine_latitude_weights cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) weights = cosine_latitude_weights(cube) Compute weights suitable for EOF analysis (or other covariance type analyses):: import numpy as np from iris.analysis.cartography import cosine_latitude_weights cube = iris.load_cube(iris.sample_data_path('air_temp.pp')) weights = np.sqrt(cosine_latitude_weights(cube)) """ # Find all latitude coordinates, we want one and only one. lat_coords = [coord for coord in cube.coords() if "latitude" in coord.name()] if len(lat_coords) > 1: raise ValueError("Multiple latitude coords are currently disallowed.") try: lat = lat_coords[0] except IndexError: raise ValueError('Cannot get latitude ' 'coordinate from cube {!r}.'.format(cube.name())) # Get the dimension position(s) of the latitude coordinate. lat_dims = cube.coord_dims(lat) # Convert to radians. lat = lat.copy() lat.convert_units('radians') # Compute the weights as the cosine of latitude. In some cases, # particularly when working in 32-bit precision, the latitude values can # extend beyond the allowable range of [-pi/2, pi/2] due to numerical # precision. We first check for genuinely out of range values, and issue a # warning if these are found. Then the cosine is computed and clipped to # the valid range [0, 1]. threshold = np.deg2rad(0.001) # small value for grid resolution if np.any(lat.points < -np.pi / 2. - threshold) or \ np.any(lat.points > np.pi / 2. + threshold): warnings.warn('Out of range latitude values will be ' 'clipped to the valid range.', UserWarning) points = lat.points l_weights = np.cos(points).clip(0., 1.) # Create weights for each grid point. This operation handles adding extra # dimensions and also the order of the dimensions. broadcast_dims = [x for x in lat_dims if x is not None] wshape = [] for idim, dim in enumerate(lat_dims): if dim is not None: wshape.append(l_weights.shape[idim]) l_weights = l_weights.reshape(wshape) broad_weights = iris.util.broadcast_to_shape(l_weights, cube.shape, broadcast_dims) return broad_weights def project(cube, target_proj, nx=None, ny=None): """ Nearest neighbour regrid to a specified target projection. Return a new cube that is the result of projecting a cube with 1 or 2 dimensional latitude-longitude coordinates from its coordinate system into a specified projection e.g. Robinson or Polar Stereographic. This function is intended to be used in cases where the cube's coordinates prevent one from directly visualising the data, e.g. when the longitude and latitude are two dimensional and do not make up a regular grid. Args: * cube An instance of :class:`iris.cube.Cube`. * target_proj An instance of the Cartopy Projection class, or an instance of :class:`iris.coord_systems.CoordSystem` from which a projection will be obtained. Kwargs: * nx Desired number of sample points in the x direction for a domain covering the globe. * ny Desired number of sample points in the y direction for a domain covering the globe. Returns: An instance of :class:`iris.cube.Cube` and a list describing the extent of the projection. .. note:: This function assumes global data and will if necessary extrapolate beyond the geographical extent of the source cube using a nearest neighbour approach. nx and ny then include those points which are outside of the target projection. .. note:: Masked arrays are handled by passing their masked status to the resulting nearest neighbour values. If masked, the value in the resulting cube is set to 0. .. warning:: This function uses a nearest neighbour approach rather than any form of linear/non-linear interpolation to determine the data value of each cell in the resulting cube. Consequently it may have an adverse effect on the statistics of the data e.g. the mean and standard deviation will not be preserved. """ try: lat_coord, lon_coord = _get_lat_lon_coords(cube) except IndexError: raise ValueError('Cannot get latitude/longitude ' 'coordinates from cube {!r}.'.format(cube.name())) if lat_coord.coord_system != lon_coord.coord_system: raise ValueError('latitude and longitude coords appear to have ' 'different coordinates systems.') if lon_coord.units != 'degrees': lon_coord = lon_coord.copy() lon_coord.convert_units('degrees') if lat_coord.units != 'degrees': lat_coord = lat_coord.copy() lat_coord.convert_units('degrees') # Determine source coordinate system if lat_coord.coord_system is None: # Assume WGS84 latlon if unspecified warnings.warn('Coordinate system of latitude and longitude ' 'coordinates is not specified. Assuming WGS84 Geodetic.') orig_cs = iris.coord_systems.GeogCS(semi_major_axis=6378137.0, inverse_flattening=298.257223563) else: orig_cs = lat_coord.coord_system # Convert to cartopy crs source_cs = orig_cs.as_cartopy_crs() # Obtain coordinate arrays (ignoring bounds) and convert to 2d # if not already. source_x = lon_coord.points source_y = lat_coord.points if source_x.ndim != 2 or source_y.ndim != 2: source_x, source_y = np.meshgrid(source_x, source_y) # Calculate target grid target_cs = None if isinstance(target_proj, iris.coord_systems.CoordSystem): target_cs = target_proj target_proj = target_proj.as_cartopy_projection() # Resolution of new grid if nx is None: nx = source_x.shape[1] if ny is None: ny = source_x.shape[0] target_x, target_y, extent = cartopy.img_transform.mesh_projection( target_proj, nx, ny) # Determine dimension mappings - expect either 1d or 2d if lat_coord.ndim != lon_coord.ndim: raise ValueError("The latitude and longitude coordinates have " "different dimensionality.") latlon_ndim = lat_coord.ndim lon_dims = cube.coord_dims(lon_coord) lat_dims = cube.coord_dims(lat_coord) if latlon_ndim == 1: xdim = lon_dims[0] ydim = lat_dims[0] elif latlon_ndim == 2: if lon_dims != lat_dims: raise ValueError("The 2d latitude and longitude coordinates " "correspond to different dimensions.") # If coords are 2d assume that grid is ordered such that x corresponds # to the last dimension (shortest stride). xdim = lon_dims[1] ydim = lon_dims[0] else: raise ValueError('Expected the latitude and longitude coordinates ' 'to have 1 or 2 dimensions, got {} and ' '{}.'.format(lat_coord.ndim, lon_coord.ndim)) # Create array to store regridded data new_shape = list(cube.shape) new_shape[xdim] = nx new_shape[ydim] = ny new_data = ma.zeros(new_shape, cube.data.dtype) # Create iterators to step through cube data in lat long slices new_shape[xdim] = 1 new_shape[ydim] = 1 index_it = np.ndindex(*new_shape) if lat_coord.ndim == 1 and lon_coord.ndim == 1: slice_it = cube.slices([lat_coord, lon_coord]) elif lat_coord.ndim == 2 and lon_coord.ndim == 2: slice_it = cube.slices(lat_coord) else: raise ValueError('Expected the latitude and longitude coordinates ' 'to have 1 or 2 dimensions, got {} and ' '{}.'.format(lat_coord.ndim, lon_coord.ndim)) # # Mask out points outside of extent in source_cs - disabled until # # a way to specify global/limited extent is agreed upon and code # # is generalised to handle -180 to +180, 0 to 360 and >360 longitudes. # source_desired_xy = source_cs.transform_points(target_proj, # target_x.flatten(), # target_y.flatten()) # if np.any(source_x < 0.0) and np.any(source_x > 180.0): # raise ValueError('Unable to handle range of longitude.') # # This does not work in all cases e.g. lon > 360 # if np.any(source_x > 180.0): # source_desired_x = (source_desired_xy[:, 0].reshape(ny, nx) + # 360.0) % 360.0 # else: # source_desired_x = source_desired_xy[:, 0].reshape(ny, nx) # source_desired_y = source_desired_xy[:, 1].reshape(ny, nx) # outof_extent_points = ((source_desired_x < source_x.min()) | # (source_desired_x > source_x.max()) | # (source_desired_y < source_y.min()) | # (source_desired_y > source_y.max())) # # Make array a mask by default (rather than a single bool) to allow mask # # to be assigned to slices. # new_data.mask = np.zeros(new_shape) # Step through cube data, regrid onto desired projection and insert results # in new_data array for index, ll_slice in zip(index_it, slice_it): # Regrid source data onto target grid index = list(index) index[xdim] = slice(None, None) index[ydim] = slice(None, None) new_data[index] = cartopy.img_transform.regrid(ll_slice.data, source_x, source_y, source_cs, target_proj, target_x, target_y) # # Mask out points beyond extent # new_data[index].mask[outof_extent_points] = True # Remove mask if it is unnecessary if not np.any(new_data.mask): new_data = new_data.data # Create new cube new_cube = iris.cube.Cube(new_data) # Add new grid coords x_coord = iris.coords.DimCoord( target_x[0, :], 'projection_x_coordinate', coord_system=copy.copy(target_cs)) y_coord = iris.coords.DimCoord( target_y[:, 0], 'projection_y_coordinate', coord_system=copy.copy(target_cs)) new_cube.add_dim_coord(x_coord, xdim) new_cube.add_dim_coord(y_coord, ydim) # Add resampled lat/lon in original coord system source_desired_xy = source_cs.transform_points(target_proj, target_x.flatten(), target_y.flatten()) new_lon_points = source_desired_xy[:, 0].reshape(ny, nx) new_lat_points = source_desired_xy[:, 1].reshape(ny, nx) new_lon_coord = iris.coords.AuxCoord(new_lon_points, standard_name='longitude', units='degrees', coord_system=orig_cs) new_lat_coord = iris.coords.AuxCoord(new_lat_points, standard_name='latitude', units='degrees', coord_system=orig_cs) new_cube.add_aux_coord(new_lon_coord, [ydim, xdim]) new_cube.add_aux_coord(new_lat_coord, [ydim, xdim]) coords_to_ignore = set() coords_to_ignore.update(cube.coords(contains_dimension=xdim)) coords_to_ignore.update(cube.coords(contains_dimension=ydim)) for coord in cube.dim_coords: if coord not in coords_to_ignore: new_cube.add_dim_coord(coord.copy(), cube.coord_dims(coord)) for coord in cube.aux_coords: if coord not in coords_to_ignore: new_cube.add_aux_coord(coord.copy(), cube.coord_dims(coord)) discarded_coords = coords_to_ignore.difference([lat_coord, lon_coord]) if discarded_coords: warnings.warn('Discarding coordinates that share dimensions with ' '{} and {}: {}'.format(lat_coord.name(), lon_coord.name(), [coord.name() for coord in discarded_coords])) # TODO handle derived coords/aux_factories # Copy metadata across new_cube.metadata = cube.metadata return new_cube, extent def _transform_xy(crs_from, x, y, crs_to): """ Shorthand function to transform 2d points between coordinate reference systems. Args: * crs_from, crs_to (:class:`cartopy.crs.Projection`): The coordinate reference systems. * x, y (arrays): point locations defined in 'crs_from'. Returns: x, y : Arrays of locations defined in 'crs_to'. """ pts = crs_to.transform_points(crs_from, x, y) return pts[..., 0], pts[..., 1] def _inter_crs_differentials(crs1, x, y, crs2): """ Calculate coordinate partial differentials from crs1 to crs2. Returns dx2/dx1, dy2/dx1, dx2/dy1 and dy2/dy1, at given locations. Args: * crs1, crs2 (`cartopy.crs.Projection`): The coordinate systems, "from" and "to". * x, y (array): Point locations defined in 'crs1'. Returns: (dx2/dx1, dy2/dx1, dx2/dy1, dy2/dy1) at given locations. Each element of this tuple will be the same shape as the 'x' and 'y' arrays and will be the partial differentials between the two systems. """ # Get locations in target crs. crs2_x, crs2_y = _transform_xy(crs1, x, y, crs2) # Define small x-deltas in the source crs. VECTOR_DELTAS_FACTOR = 360000.0 # Empirical factor to obtain small delta. delta_x = (crs1.x_limits[1] - crs1.x_limits[0]) / VECTOR_DELTAS_FACTOR delta_x = delta_x * np.ones(x.shape) eps = 1e-9 # Reverse deltas where we would otherwise step outside the valid range. invalid_dx = x + delta_x > crs1.x_limits[1] - eps delta_x[invalid_dx] = -delta_x[invalid_dx] # Calculate the transformed point with x = x + dx. crs2_x2, crs2_y2 = _transform_xy(crs1, x + delta_x, y, crs2) # Form differentials wrt dx. dx2_dx = (crs2_x2 - crs2_x) / delta_x dy2_dx = (crs2_y2 - crs2_y) / delta_x # Define small y-deltas in the source crs. delta_y = (crs1.y_limits[1] - crs1.y_limits[0]) / VECTOR_DELTAS_FACTOR delta_y = delta_y * np.ones(y.shape) # Reverse deltas where we would otherwise step outside the valid range. invalid_dy = y + delta_y > crs1.y_limits[1] - eps delta_y[invalid_dy] = -delta_y[invalid_dy] # Calculate the transformed point with y = y + dy. crs2_x2, crs2_y2 = _transform_xy(crs1, x, y + delta_y, crs2) # Form differentials wrt dy. dx2_dy = (crs2_x2 - crs2_x) / delta_y dy2_dy = (crs2_y2 - crs2_y) / delta_y return dx2_dx, dy2_dx, dx2_dy, dy2_dy def _crs_distance_differentials(crs, x, y): """ Calculate d(distance) / d(x) and ... / d(y) for a coordinate reference system at specified locations. Args: * crs (:class:`cartopy.crs.Projection`): The coordinate reference system. * x, y (array): Locations at which to calculate the differentials, defined in 'crs' coordinate reference system. Returns: (abs(ds/dx), abs(ds/dy)). Numerically approximated partial differentials, i.e. scaling factors between changes in distance and changes in coordinate values. """ # Make a true-latlon coordinate system for distance calculations. crs_latlon = ccrs.Geodetic(globe=ccrs.Globe(ellipse='sphere')) # Transform points to true-latlon (just to get the true latitudes). _, true_lat = _transform_xy(crs, x, y, crs_latlon) # Get coordinate differentials w.r.t. true-latlon. dlon_dx, dlat_dx, dlon_dy, dlat_dy = \ _inter_crs_differentials(crs, x, y, crs_latlon) # Calculate effective scalings of X and Y coordinates. lat_factor = np.cos(np.deg2rad(true_lat))**2 ds_dx = np.sqrt(dlat_dx * dlat_dx + dlon_dx * dlon_dx * lat_factor) ds_dy = np.sqrt(dlat_dy * dlat_dy + dlon_dy * dlon_dy * lat_factor) return ds_dx, ds_dy def _transform_distance_vectors(u_dist, v_dist, ds, dx2, dy2): """ Transform distance vectors from one coordinate reference system to another, preserving magnitude and physical direction. Args: * u_dist, v_dist (array): Components of each vector along the x and y directions of the source crs at each location. * ds (`DistanceDifferential`): Distance differentials for the source and the target crs at specified locations. * dx2, dy2 (`PartialDifferential`): Partial differentials from the source to the target crs. Returns: (ut_dist, vt_dist): Tuple of arrays containing the vector components along the x and y directions of the target crs at each location. """ # Scale input distance vectors --> source-coordinate differentials. u1, v1 = u_dist / ds.dx1, v_dist / ds.dy1 # Transform vectors into the target system. u2 = dx2.dx1 * u1 + dx2.dy1 * v1 v2 = dy2.dx1 * u1 + dy2.dy1 * v1 # Rescale output coordinate vectors --> target distance vectors. u2_dist, v2_dist = u2 * ds.dx2, v2 * ds.dy2 return u2_dist, v2_dist def _transform_distance_vectors_tolerance_mask(src_crs, x, y, tgt_crs, ds, dx2, dy2): """ Return a mask that can be applied to data array to mask elements where the magnitude of vectors are not preserved due to numerical errors introduced by the tranformation between coordinate systems. Args: * src_crs (`cartopy.crs.Projection`): The source coordinate reference systems. * x, y (array): Locations of each vector defined in 'src_crs'. * tgt_crs (`cartopy.crs.Projection`): The target coordinate reference systems. * ds (`DistanceDifferential`): Distance differentials for src_crs and tgt_crs at specified locations * dx2, dy2 (`PartialDifferential`): Partial differentials from src_crs to tgt_crs. Returns: 2d boolean array that is the same shape as x and y. """ if x.shape != y.shape: raise ValueError('Arrays do not have matching shapes. ' 'x.shape is {}, y.shape is {}.'.format(x.shape, y.shape)) ones = np.ones(x.shape) zeros = np.zeros(x.shape) u_one_t, v_zero_t = _transform_distance_vectors(ones, zeros, ds, dx2, dy2) u_zero_t, v_one_t = _transform_distance_vectors(zeros, ones, ds, dx2, dy2) # Squared magnitudes should be equal to one within acceptable tolerance. # A value of atol=2e-3 is used, which corresponds to a change in magnitude # of approximately 0.1%. sqmag_1_0 = u_one_t**2 + v_zero_t**2 sqmag_0_1 = u_zero_t**2 + v_one_t**2 mask = np.logical_not( np.logical_and(np.isclose(sqmag_1_0, ones, atol=2e-3), np.isclose(sqmag_0_1, ones, atol=2e-3))) return mask def rotate_winds(u_cube, v_cube, target_cs): """ Transform wind vectors to a different coordinate system. The input cubes contain U and V components parallel to the local X and Y directions of the input grid at each point. The output cubes contain the same winds, at the same locations, but relative to the grid directions of a different coordinate system. Thus in vector terms, the magnitudes will always be the same, but the angles can be different. The outputs retain the original horizontal dimension coordinates, but also have two 2-dimensional auxiliary coordinates containing the X and Y locations in the target coordinate system. Args: * u_cube An instance of :class:`iris.cube.Cube` that contains the x-component of the vector. * v_cube An instance of :class:`iris.cube.Cube` that contains the y-component of the vector. * target_cs An instance of :class:`iris.coord_systems.CoordSystem` that specifies the new grid directions. Returns: A (u', v') tuple of :class:`iris.cube.Cube` instances that are the u and v components in the requested target coordinate system. The units are the same as the inputs. .. note:: The U and V values relate to distance, with units such as 'm s-1'. These are not the same as coordinate vectors, which transform in a different manner. .. note:: The names of the output cubes are those of the inputs, prefixed with 'transformed\_' (e.g. 'transformed_x_wind'). .. warning:: Conversion between rotated-pole and non-rotated systems can be expressed analytically. However, this function always uses a numerical approach. In locations where this numerical approach does not preserve magnitude to an accuracy of 0.1%, the corresponding elements of the returned cubes will be masked. """ # Check u_cube and v_cube have the same shape. We iterate through # the u and v cube slices which relies on the shapes matching. if u_cube.shape != v_cube.shape: msg = 'Expected u and v cubes to have the same shape. ' \ 'u cube has shape {}, v cube has shape {}.' raise ValueError(msg.format(u_cube.shape, v_cube.shape)) # Check the u_cube and v_cube have the same x and y coords. msg = 'Coordinates differ between u and v cubes. Coordinate {!r} from ' \ 'u cube does not equal coordinate {!r} from v cube.' if u_cube.coord(axis='x') != v_cube.coord(axis='x'): raise ValueError(msg.format(u_cube.coord(axis='x').name(), v_cube.coord(axis='x').name())) if u_cube.coord(axis='y') != v_cube.coord(axis='y'): raise ValueError(msg.format(u_cube.coord(axis='y').name(), v_cube.coord(axis='y').name())) # Check x and y coords have the same coordinate system. x_coord = u_cube.coord(axis='x') y_coord = u_cube.coord(axis='y') if x_coord.coord_system != y_coord.coord_system: msg = "Coordinate systems of x and y coordinates differ. " \ "Coordinate {!r} has a coord system of {!r}, but coordinate " \ "{!r} has a coord system of {!r}." raise ValueError(msg.format(x_coord.name(), x_coord.coord_system, y_coord.name(), y_coord.coord_system)) # Convert from iris coord systems to cartopy CRSs to access # transform functionality. Use projection as cartopy # transform_vectors relies on x_limits and y_limits. if x_coord.coord_system is not None: src_crs = x_coord.coord_system.as_cartopy_projection() else: # Default to Geodetic (but actually use PlateCarree as a # projection is needed). src_crs = ccrs.PlateCarree() target_crs = target_cs.as_cartopy_projection() # Check the number of dimensions of the x and y coords is the same. # Subsequent logic assumes either both 1d or both 2d. x = x_coord.points y = y_coord.points if x.ndim != y.ndim or x.ndim > 2 or y.ndim > 2: msg = 'x and y coordinates must have the same number of dimensions ' \ 'and be either 1D or 2D. The number of dimensions are {} and ' \ '{}, respectively.'.format(x.ndim, y.ndim) raise ValueError(msg) # Check the dimension mappings match between u_cube and v_cube. if u_cube.coord_dims(x_coord) != v_cube.coord_dims(x_coord): raise ValueError('Dimension mapping of x coordinate differs ' 'between u and v cubes.') if u_cube.coord_dims(y_coord) != v_cube.coord_dims(y_coord): raise ValueError('Dimension mapping of y coordinate differs ' 'between u and v cubes.') x_dims = u_cube.coord_dims(x_coord) y_dims = u_cube.coord_dims(y_coord) # Convert points to 2D, if not already, and determine dims. if x.ndim == y.ndim == 1: x, y = np.meshgrid(x, y) dims = (y_dims[0], x_dims[0]) else: dims = x_dims # Transpose x, y 2d arrays to match the order in cube's data # array so that x, y and the sliced data all line up. if dims[0] > dims[1]: x = x.transpose() y = y.transpose() # Create resulting cubes. ut_cube = u_cube.copy() vt_cube = v_cube.copy() ut_cube.rename('transformed_{}'.format(u_cube.name())) vt_cube.rename('transformed_{}'.format(v_cube.name())) # Get distance scalings for source crs. ds_dx1, ds_dy1 = _crs_distance_differentials(src_crs, x, y) # Get distance scalings for target crs. x2, y2 = _transform_xy(src_crs, x, y, target_crs) ds_dx2, ds_dy2 = _crs_distance_differentials(target_crs, x2, y2) ds = DistanceDifferential(ds_dx1, ds_dy1, ds_dx2, ds_dy2) # Calculate coordinate partial differentials from source crs to target crs. dx2_dx1, dy2_dx1, dx2_dy1, dy2_dy1 = _inter_crs_differentials(src_crs, x, y, target_crs) dx2 = PartialDifferential(dx2_dx1, dx2_dy1) dy2 = PartialDifferential(dy2_dx1, dy2_dy1) # Calculate mask based on preservation of magnitude. mask = _transform_distance_vectors_tolerance_mask(src_crs, x, y, target_crs, ds, dx2, dy2) apply_mask = mask.any() if apply_mask: # Make masked arrays to accept masking. ut_cube.data = ma.asanyarray(ut_cube.data) vt_cube.data = ma.asanyarray(vt_cube.data) # Project vectors with u, v components one horiz slice at a time and # insert into the resulting cubes. shape = list(u_cube.shape) for dim in dims: shape[dim] = 1 ndindex = np.ndindex(*shape) for index in ndindex: index = list(index) for dim in dims: index[dim] = slice(None, None) index = tuple(index) u = u_cube.data[index] v = v_cube.data[index] ut, vt = _transform_distance_vectors(u, v, ds, dx2, dy2) if apply_mask: ut = ma.asanyarray(ut) ut[mask] = ma.masked vt = ma.asanyarray(vt) vt[mask] = ma.masked ut_cube.data[index] = ut vt_cube.data[index] = vt # Calculate new coords of locations in target coordinate system. xyz_tran = target_crs.transform_points(src_crs, x, y) xt = xyz_tran[..., 0].reshape(x.shape) yt = xyz_tran[..., 1].reshape(y.shape) # Transpose xt, yt 2d arrays to match the dim order # of the original x an y arrays - i.e. undo the earlier # transpose (if applied). if dims[0] > dims[1]: xt = xt.transpose() yt = yt.transpose() xt_coord = iris.coords.AuxCoord(xt, standard_name='projection_x_coordinate', coord_system=target_cs) yt_coord = iris.coords.AuxCoord(yt, standard_name='projection_y_coordinate', coord_system=target_cs) # Set units based on coord_system. if isinstance(target_cs, (iris.coord_systems.GeogCS, iris.coord_systems.RotatedGeogCS)): xt_coord.units = yt_coord.units = 'degrees' else: xt_coord.units = yt_coord.units = 'm' ut_cube.add_aux_coord(xt_coord, dims) ut_cube.add_aux_coord(yt_coord, dims) vt_cube.add_aux_coord(xt_coord.copy(), dims) vt_cube.add_aux_coord(yt_coord.copy(), dims) return ut_cube, vt_cube
gpl-3.0
7,761,359,626,865,688,000
36.406475
79
0.608376
false
ggreco77/GWsky
GWsky/moon.py
1
3795
from __future__ import print_function import astropy from astropy import units as u from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_moon, get_sun from astropy.time import TimeDelta from astropy.time import Time import numpy as np from .config_values import UserValues from .aladinSAMP import AladinViaSAMP, AladinScriptCommands from .utils import Utils class Moon(object): def __init__(self): """Getting user-values from config_values module.""" self.user = UserValues() self.aladin = AladinScriptCommands() self.latitude = self.user.get_latitude() self.longitude = self.user.get_longitude() self.altitude = self.user.get_altitude() self.obs_time = Time(self.user.get_obs_time()) self.dt = TimeDelta(7200.0, format='sec') self.step = 0 self.end_step = 11 def get_location(self): observatory = astropy.coordinates.EarthLocation(lat = self.latitude*u.deg, lon = self.longitude*u.deg, height = self.altitude*u.m) return observatory def get_time(self): self.time = Time(self.obs_time) return self.time def moon_on_sky(self): self.get_location() #self.get.time() self.sky_position() def steps(self): """Moon position in step of one hour for an input sky position (ra, dec). 10 steps are performed: step <10; dt = 1h.""" #obs_time = Time(self.obs_time) self.aladin.draw_newtool("Moon") self.time = Time(self.obs_time) self.observatory = self.get_location() while self.step < self.end_step: time_update = self.time + self.step*self.dt position_moon = get_moon(time_update, self.observatory) #val = self.airmass(ra, dec, self.altitude, self.longitude, self.altitude, # time_input) #self.airmass_list.append(val) #self.time_list.append(str(time_input)) self.step+=1 self.aladin.draw_string(position_moon.ra, position_moon.dec, "MOON"+ "-->" + str(time_update.isot)) #print str(time_update.isot) #print position_moon.ra, position_moon.ra def illumination(self): """Return the fraction of the moon illumination. Modified version of astroplan project.""" sun = get_sun(self.obs_time) observatory = self.get_location() moon = get_moon(self.obs_time, observatory) #print moon elongation = sun.separation(moon) i = np.arctan2(sun.distance*np.sin(elongation), moon.distance - sun.distance*np.cos(elongation)) k = (1 + np.cos(i))/2.0 return round(k.value, 2) def from_fov(self, ra_fov_center, dec_fov_center): """ Return the Moon position over the sky.""" observatory = self.get_location() moon = get_moon(self.obs_time, observatory) distance = Utils.separation(ra_fov_center, dec_fov_center, moon.ra, moon.dec) #print moon.ra*u.deg, moon.dec*u.deg return distance.deg def sky_position(self): """Plot the Moon position on the Aladin plane.""" time = Time(self.obs_time) observatory = self.get_location() position_moon = get_moon(time, observatory) illumination = self.illumination() #self.aladin.draw_string(position_moon.ra, position_moon.dec, "MOON position") self.aladin.draw_moon(position_moon.ra, position_moon.dec, illumination)
bsd-2-clause
1,740,091,642,959,692,000
30.890756
111
0.581818
false
kgsn1763/deep-learning-from-scratch
common/gradient.py
1
1175
#!/usr/bin/env python # coding: utf-8 import numpy as np def _numerical_gradient_1d(f, x): h = 1e-4 # 0.0001 grad = np.zeros_like(x) for idx in range(x.size): tmp_val = x[idx] x[idx] = float(tmp_val) + h fxh1 = f(x) # f(x+h) x[idx] = tmp_val - h fxh2 = f(x) # f(x-h) grad[idx] = (fxh1 - fxh2) / (2*h) x[idx] = tmp_val # 値を元に戻す return grad def numerical_gradient_2d(f, X): if X.ndim == 1: return _numerical_gradient_1d(f, X) else: grad = np.zeros_like(X) for idx, x in enumerate(X): grad[idx] = _numerical_gradient_1d(f, x) return grad def numerical_gradient(f, x): h = 1e-4 # 0.0001 grad = np.zeros_like(x) it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index tmp_val = x[idx] x[idx] = float(tmp_val) + h fxh1 = f(x) # f(x+h) x[idx] = tmp_val - h fxh2 = f(x) # f(x-h) grad[idx] = (fxh1 - fxh2) / (2*h) x[idx] = tmp_val # 値を元に戻す it.iternext() return grad
mit
7,474,601,776,317,289,000
19.927273
68
0.49609
false
HyperloopTeam/FullOpenMDAO
lib/python2.7/site-packages/openmdao.lib-0.13.0-py2.7.egg/openmdao/lib/datatypes/domain/flow.py
1
24856
import copy import numpy VERTEX = 'Vertex' CELL_CENTER = 'CellCenter' _GRID_LOCATIONS = (VERTEX, CELL_CENTER) class FlowSolution(object): """ Contains solution variables for a :class:`Zone`. All variables have the same shape and grid location. """ def __init__(self): self._grid_location = VERTEX self._ghosts = (0, 0, 0, 0, 0, 0) self._arrays = [] self._vectors = [] def _get_grid_location(self): return self._grid_location def _set_grid_location(self, loc): if loc not in _GRID_LOCATIONS: raise ValueError('%r is not a valid grid location' % loc) self._grid_location = loc grid_location = property(_get_grid_location, _set_grid_location, doc='Position at which data is located;' ' must be one of %s' % (_GRID_LOCATIONS,)) def _get_ghosts(self): return self._ghosts def _set_ghosts(self, ghosts): if len(ghosts) < 2*len(self.shape): raise ValueError('ghosts must be a %d-element array' % (2*len(self.shape))) for i in ghosts: if i < 0: raise ValueError('All ghost values must be >= 0') self._ghosts = ghosts for vector in self._vectors: vector.ghosts = ghosts ghosts = property(_get_ghosts, _set_ghosts, doc='Number of ghost cells for each index direction.') @property def arrays(self): """ List of scalar data arrays. """ return self._arrays @property def vectors(self): """ List of vector data. """ return self._vectors @property def shape(self): """ Data index limits, not including 'ghost/rind' planes. """ ijk = self.real_shape if len(ijk) < 1: return () ghosts = self._ghosts imax = ijk[0] - (ghosts[0] + ghosts[1]) if len(ijk) < 2: return (imax,) jmax = ijk[1] - (ghosts[2] + ghosts[3]) if len(ijk) < 3: return (imax, jmax) kmax = ijk[2] - (ghosts[4] + ghosts[5]) return (imax, jmax, kmax) @property def real_shape(self): """ Data index limits, including any 'ghost/rind' planes. """ if self._vectors: return self._vectors[0].real_shape elif self._arrays: return self._arrays[0].shape else: return () def add_array(self, name, array): """ Add a :class:`numpy.ndarray` of scalar data and bind to `name`. Returns the added array. name: string Name for the added array. array: :class:`numpy.ndarray` Scalar data. """ if hasattr(self, name): raise ValueError('name %r is already bound' % name) if self._arrays: ijk = self._arrays[0].shape elif self._vectors: ijk = self._vectors[0].shape else: ijk = () if ijk and array.shape != ijk: raise ValueError('array shape %s != existing shape %s' % (array.shape, ijk)) setattr(self, name, array) self._arrays.append(array) return array def add_vector(self, name, vector): """ Add a :class:`Vector` and bind to `name`. Returns the added vector. name: string Name for the added array. vector: :class:`Vector` Vector data. """ if hasattr(self, name): raise ValueError('name %r is already bound' % name) shape = self.real_shape if shape and vector.real_shape != shape: raise ValueError('vector real shape %s != existing real shape %s' % (vector.real_shape, shape)) setattr(self, name, vector) self._vectors.append(vector) vector.ghosts = self.ghosts return vector def copy(self): """ Returns a deep copy of self. """ return copy.deepcopy(self) def _copy_scalars(self, other): """ Copy scalars from `other` to self. """ for name, val in other.__dict__.items(): if not hasattr(self, name): setattr(self, name, val) def is_equivalent(self, other, logger, tolerance=0.): """ Test if self and `other` are equivalent. other: :class:`FlowSolution` The flowfield to check against. logger: :class:`Logger` or None Used to log debug messages that will indicate what, if anything, is not equivalent. tolerance: float The maximum relative difference in array values to be considered equivalent. """ if not isinstance(other, FlowSolution): logger.debug('other is not a FlowSolution object.') return False if other.grid_location != self.grid_location: logger.debug('grid locations are not equal: %s vs. %s.', other.grid_location, self.grid_location) return False if other.ghosts != self.ghosts: logger.debug('flow ghost cell counts are not equal: %s vs. %s.', other.ghosts, self.ghosts) return False for arr in self._arrays: name = self.name_of_obj(arr) try: other_arr = getattr(other, name) except AttributeError: logger.debug('other is missing array %r', name) return False if tolerance > 0.: if not numpy.allclose(other_arr, arr, tolerance, tolerance): logger.debug("%s values are not 'close'.", name) return False else: if (other_arr != arr).any(): logger.debug('%s values are not equal.', name) return False for vector in self._vectors: name = self.name_of_obj(vector) try: other_vector = getattr(other, name) except AttributeError: logger.debug('other is missing vector %r', name) return False if not vector.is_equivalent(other_vector, name, logger, tolerance): return False # TODO: check scalars return True def extract(self, imin, imax, jmin=None, jmax=None, kmin=None, kmax=None, ghosts=None): """ Construct a new :class:`FlowSolution` from data extracted from the specified region. imin, imax, jmin, jmax, kmin, kmax: int Specifies the region to extract neglecting ghost/rind planes. Negative values are relative to the size in that dimension, so -1 refers to the last element. For 2D zones omit kmin and kmax. For 1D zones omit jmin, jmax, kmin, and kmax. ghosts: int[] Number of ghost/rind planes for the new zone. If ``None`` the existing specification is used. """ ghosts = ghosts or self._ghosts i = len(self.shape) if i == 3: if jmin is None or jmax is None or kmin is None or kmax is None: raise ValueError('3D extract requires jmin, jmax, kmin, and kmax') return self._extract_3d(imin, imax, jmin, jmax, kmin, kmax, ghosts) elif i == 2: if kmin is not None or kmax is not None: raise ValueError('2D extract undefined for kmin or kmax') if jmin is None or jmax is None: raise ValueError('2D extract requires jmin and jmax') return self._extract_2d(imin, imax, jmin, jmax, ghosts) elif i == 1: if kmin is not None or kmax is not None or \ jmin is not None or jmax is not None: raise ValueError('1D extract undefined for jmin, jmax, kmin, or kmax') return self._extract_1d(imin, imax, ghosts) else: raise RuntimeError('FlowSolution is empty!') def _extract_3d(self, imin, imax, jmin, jmax, kmin, kmax, new_ghosts): """ 3D (index space) extraction. """ imn, imx, jmn, jmx, kmn, kmx = imin, imax, jmin, jmax, kmin, kmax ghosts = self._ghosts # Support end-relative indexing and adjust for existing ghostplanes. flow_imax, flow_jmax, flow_kmax = self.shape if imin < 0: imin += flow_imax imin += ghosts[0] if imax < 0: imax += flow_imax imax += ghosts[0] if jmin < 0: jmin += flow_jmax jmin += ghosts[2] if jmax < 0: jmax += flow_jmax jmax += ghosts[2] if kmin < 0: kmin += flow_kmax kmin += ghosts[4] if kmax < 0: kmax += flow_kmax kmax += ghosts[4] # Adjust for new ghost/rind planes. imin -= new_ghosts[0] imax += new_ghosts[1] jmin -= new_ghosts[2] jmax += new_ghosts[3] kmin -= new_ghosts[4] kmax += new_ghosts[5] # Check limits. if imin < 0 or imax > flow_imax+ghosts[1] or \ jmin < 0 or jmax > flow_jmax+ghosts[3] or \ kmin < 0 or kmax > flow_kmax+ghosts[5]: region = (imin, imax, jmin, jmax, kmin, kmax) original = (0, flow_imax+ghosts[1], 0, flow_jmax+ghosts[3], 0, flow_kmax+ghosts[5]) raise ValueError('Extraction region %s exceeds original %s' % (region, original)) # Extract. flow = FlowSolution() for arr in self._arrays: flow.add_array(self.name_of_obj(arr), arr[imin:imax+1, jmin:jmax+1, kmin:kmax+1]) for vector in self._vectors: flow.add_vector(self.name_of_obj(vector), vector.extract(imn, imx, jmn, jmx, kmn, kmx, ghosts=new_ghosts)) flow.grid_location = self.grid_location flow.ghosts = new_ghosts flow._copy_scalars(self) return flow def _extract_2d(self, imin, imax, jmin, jmax, new_ghosts): """ 2D (index space) extraction. """ imn, imx, jmn, jmx, = imin, imax, jmin, jmax ghosts = self._ghosts # Support end-relative indexing and adjust for existing ghost planes. flow_imax, flow_jmax = self.shape if imin < 0: imin += flow_imax imin += ghosts[0] if imax < 0: imax += flow_imax imax += ghosts[0] if jmin < 0: jmin += flow_jmax jmin += ghosts[2] if jmax < 0: jmax += flow_jmax jmax += ghosts[2] # Adjust for new ghost/rind planes. imin -= new_ghosts[0] imax += new_ghosts[1] jmin -= new_ghosts[2] jmax += new_ghosts[3] # Check limits. if imin < 0 or imax > flow_imax+ghosts[1] or \ jmin < 0 or jmax > flow_jmax+ghosts[3]: region = (imin, imax, jmin, jmax) original = (0, flow_imax+ghosts[1], 0, flow_jmax+ghosts[3]) raise ValueError('Extraction region %s exceeds original %s' % (region, original)) # Extract. flow = FlowSolution() for arr in self._arrays: flow.add_array(self.name_of_obj(arr), arr[imin:imax+1, jmin:jmax+1]) for vector in self._vectors: flow.add_vector(self.name_of_obj(vector), vector.extract(imn, imx, jmn, jmx, ghosts=new_ghosts)) flow.grid_location = self.grid_location flow.ghosts = new_ghosts flow._copy_scalars(self) return flow def _extract_1d(self, imin, imax, new_ghosts): """ 1D (index space) extraction. """ imn, imx = imin, imax ghosts = self._ghosts # Support end-relative indexing and adjust for existing ghost planes. flow_imax, = self.shape if imin < 0: imin += flow_imax imin += ghosts[0] if imax < 0: imax += flow_imax imax += ghosts[0] # Adjust for new ghost/rind planes. imin -= new_ghosts[0] imax += new_ghosts[1] # Check limits. if imin < 0 or imax > flow_imax+ghosts[1]: region = (imin, imax) original = (0, flow_imax+ghosts[1]) raise ValueError('Extraction region %s exceeds original %s' % (region, original)) # Extract. flow = FlowSolution() for arr in self._arrays: flow.add_array(self.name_of_obj(arr), arr[imin:imax+1]) for vector in self._vectors: flow.add_vector(self.name_of_obj(vector), vector.extract(imn, imx, ghosts=new_ghosts)) flow.grid_location = self.grid_location flow.ghosts = new_ghosts flow._copy_scalars(self) return flow def extend(self, axis, delta, npoints): """ Construct a new :class:`FlowSolution` by replication. The existing ghosts/rind planes specification is retained. axis: 'i', 'j', or 'k' Index axis to extend. delta: float. Direction. A negative value adds points before the current zero-index of `axis`. npoints: int > 0 Number of points to add in `axis` dimension. """ if not delta: raise ValueError('delta must be non-zero') if npoints < 1: raise ValueError('npoints must be >= 1') i = len(self.shape) if i == 3: if axis not in ('i', 'j', 'k'): raise ValueError('axis must be i, j, or k') return self._extend_3d(axis, delta, npoints) elif i == 2: if axis not in ('i', 'j'): raise ValueError('axis must be i or j') return self._extend_2d(axis, delta, npoints) elif i == 1: if axis != 'i': raise ValueError('axis must be i') return self._extend_1d(delta, npoints) else: raise RuntimeError('FlowSolution is empty!') def _extend_3d(self, axis, delta, npoints): """ 3D (index space) extension. """ imax, jmax, kmax = self.real_shape if axis == 'i': new_shape = (imax + npoints, jmax, kmax) indx = imax if delta > 0 else npoints elif axis == 'j': new_shape = (imax, jmax + npoints, kmax) indx = jmax if delta > 0 else npoints else: new_shape = (imax, jmax, kmax + npoints) indx = kmax if delta > 0 else npoints flow = FlowSolution() for arr in self._arrays: new_arr = numpy.zeros(new_shape) if axis == 'i': if delta > 0: new_arr[0:indx, :, :] = arr for i in range(npoints): new_arr[indx+i, :, :] = arr[-1, :, :] else: new_arr[indx:, :, :] = arr for i in range(npoints): new_arr[i, :, :] = arr[0, :, :] elif axis == 'j': if delta > 0: new_arr[:, 0:indx, :] = arr for j in range(npoints): new_arr[:, indx+j, :] = arr[:, -1, :] else: new_arr[:, indx:, :] = arr for j in range(npoints): new_arr[:, j, :] = arr[:, 0, :] else: if delta > 0: new_arr[:, :, 0:indx] = arr for k in range(npoints): new_arr[:, :, indx+k] = arr[:, :, -1] else: new_arr[:, :, indx:] = arr for k in range(npoints): new_arr[:, :, k] = arr[:, :, 0] flow.add_array(self.name_of_obj(arr), new_arr) for vector in self._vectors: flow.add_vector(self.name_of_obj(vector), vector.extend(axis, delta, npoints)) flow.grid_location = self.grid_location flow.ghosts = copy.copy(self._ghosts) flow._copy_scalars(self) return flow def _extend_2d(self, axis, delta, npoints): """ 2D (index space) extension. """ imax, jmax = self.real_shape if axis == 'i': new_shape = (imax + npoints, jmax) indx = imax if delta > 0 else npoints else: new_shape = (imax, jmax + npoints) indx = jmax if delta > 0 else npoints flow = FlowSolution() for arr in self._arrays: new_arr = numpy.zeros(new_shape) if axis == 'i': if delta > 0: new_arr[0:indx, :] = arr for i in range(npoints): new_arr[indx+i, :] = arr[-1, :] else: new_arr[indx:, :] = arr for i in range(npoints): new_arr[i, :] = arr[0, :] else: if delta > 0: new_arr[:, 0:indx] = arr for j in range(npoints): new_arr[:, indx+j] = arr[:, -1] else: new_arr[:, indx:] = arr for j in range(npoints): new_arr[:, j] = arr[:, 0] flow.add_array(self.name_of_obj(arr), new_arr) for vector in self._vectors: flow.add_vector(self.name_of_obj(vector), vector.extend(axis, delta, npoints)) flow.grid_location = self.grid_location flow.ghosts = copy.copy(self._ghosts) flow._copy_scalars(self) return flow def _extend_1d(self, delta, npoints): """ 1D (index space) extension. """ imax, = self.real_shape new_shape = (imax + npoints,) indx = imax if delta > 0 else npoints flow = FlowSolution() for arr in self._arrays: new_arr = numpy.zeros(new_shape) if delta > 0: new_arr[0:indx] = arr for i in range(npoints): new_arr[indx+i] = arr[-1] else: new_arr[indx:] = arr for i in range(npoints): new_arr[i] = arr[0] flow.add_array(self.name_of_obj(arr), new_arr) for vector in self._vectors: flow.add_vector(self.name_of_obj(vector), vector.extend('i', delta, npoints)) flow.grid_location = self.grid_location flow.ghosts = copy.copy(self._ghosts) flow._copy_scalars(self) return flow def name_of_obj(self, obj): """ Return name of object or None if not found. """ for name, value in self.__dict__.items(): if value is obj: return name return None def flip_z(self): """ Convert to other-handed coordinate system. """ for vector in self._vectors: vector.flip_z() def make_cartesian(self, grid, axis='z'): """ Convert to Cartesian coordinate system. grid: :class:`GridCoordinates` Must be in cylindrical form. axis: string Specifies which is the cylinder axis ('z' or 'x'). """ for vector in self._vectors: vector.make_cartesian(grid, axis) def make_cylindrical(self, grid, axis='z'): """ Convert to cylindrical coordinate system. grid: :class:`GridCoordinates` Must be in cylindrical form. axis: string Specifies which is the cylinder axis ('z' or 'x'). """ for vector in self._vectors: vector.make_cylindrical(grid, axis) def rotate_about_x(self, deg): """ Rotate about the X axis. deg: float (degrees) Amount of rotation. """ for vector in self._vectors: vector.rotate_about_x(deg) def rotate_about_y(self, deg): """ Rotate about the Y axis. deg: float (degrees) Amount of rotation. """ for vector in self._vectors: vector.rotate_about_y(deg) def rotate_about_z(self, deg): """ Rotate about the Z. deg: float (degrees) Amount of rotation. """ for vector in self._vectors: vector.rotate_about_z(deg) def promote(self): """ Promote from N-dimensional to N+1 dimensional index space. """ shape = self.real_shape if len(shape) > 2: raise RuntimeError('FlowSolution is 3D') elif len(shape) > 1: imax, jmax = shape for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((imax, jmax, 1)) new_arr[:, :, 0] = arr[:, :] setattr(self, name, new_arr) self._arrays[i] = new_arr elif len(shape) > 0: imax = shape[0] for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((imax, 1)) new_arr[:, 0] = arr[:] setattr(self, name, new_arr) self._arrays[i] = new_arr else: raise RuntimeError('FlowSolution is empty!') for vector in self._vectors: vector.promote() def demote(self): """ Demote from N-dimensional to N-1 dimensional index space. """ shape = self.real_shape ghosts = self._ghosts if len(shape) > 2: imax, jmax, kmax = shape imx = imax - (ghosts[0] + ghosts[1]) jmx = jmax - (ghosts[2] + ghosts[3]) kmx = kmax - (ghosts[4] + ghosts[5]) if imx == 1: for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((jmax, kmax)) new_arr[:, :] = arr[ghosts[0], :, :] setattr(self, name, new_arr) self._arrays[i] = new_arr self._ghosts = (ghosts[2], ghosts[3], ghosts[4], ghosts[5], 0, 0) elif jmx == 1: for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((imax, kmax)) new_arr[:, :] = arr[:, ghosts[1], :] setattr(self, name, new_arr) self._arrays[i] = new_arr self._ghosts = (ghosts[0], ghosts[1], ghosts[4], ghosts[5], 0, 0) elif kmx == 1: for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((imax, jmax)) new_arr[:, :] = arr[:, :, ghosts[2]] setattr(self, name, new_arr) self._arrays[i] = new_arr self._ghosts = (ghosts[0], ghosts[1], ghosts[2], ghosts[3], 0, 0) else: raise RuntimeError('No i, j, or k plane to collapse') elif len(shape) > 1: imax, jmax = shape imx = imax - (ghosts[0] + ghosts[1]) jmx = jmax - (ghosts[2] + ghosts[3]) if imx == 1: for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((jmax,)) new_arr[:] = arr[ghosts[0], :] setattr(self, name, new_arr) self._arrays[i] = new_arr self._ghosts = (ghosts[2], ghosts[3], 0, 0, 0, 0) elif jmx == 1: for i, arr in enumerate(self._arrays): name = self.name_of_obj(arr) new_arr = numpy.zeros((imax,)) new_arr[:] = arr[:, ghosts[1]] setattr(self, name, new_arr) self._arrays[i] = new_arr self._ghosts = (ghosts[0], ghosts[1], 0, 0, 0, 0) else: raise RuntimeError('No i or j plane to collapse') elif len(shape) > 0: raise RuntimeError('FlowSolution is 1D') else: raise RuntimeError('FlowSolution is empty!') for vector in self._vectors: vector.demote()
gpl-2.0
-6,503,133,839,455,290,000
34.457917
86
0.494086
false
jayshonzs/UFLDL
SoftmaxRegression/train.py
1
1862
__author__ = 'xiajie' import numpy as np from softMaxCost import J, vec2mat from scipy.optimize import fmin_l_bfgs_b from MNISThelper.loader import load_train_imgs, load_train_labels from SoftmaxRegression.prediction import predict from computeNumericalGradient import compute_numerical_gradient def initialize_theta(input_size, num_classes): return np.random.normal(0, 1., input_size*num_classes)*0.005 def check_numerical_gradient(theta, X, Y, num_classes, lbda=1e-4): args = (X, Y, lbda, num_classes) grad = J(theta, args)[1] numeric_grad = compute_numerical_gradient(J, theta, args) for i, g in enumerate(grad): print g, numeric_grad[i] def train(init_theta, X, Y, num_classes, lbda=1e-4): try: theta, cost, info = fmin_l_bfgs_b(J, init_theta, args=((X, Y, lbda, num_classes),), approx_grad=False, maxiter=400) except Exception as e: print e print theta print cost print info return theta if __name__ == '__main__': inputSize = 28*28 numClasses = 10 lbda = 1e-4 init_theta = initialize_theta(inputSize, numClasses) X = load_train_imgs(u'../MNISThelper/train-images.idx3-ubyte') Y = load_train_labels(u'../MNISThelper/train-labels.idx1-ubyte') print X.shape, Y.shape, init_theta.shape # numerical gradient check ''' cX = X[:20, :10] cY = Y[:10] check_numerical_gradient(init_theta[:200], cX, cY, numClasses, lbda=lbda) ''' theta = train(init_theta, X, Y, numClasses, lbda) mat = vec2mat(theta, numClasses) tX = load_train_imgs('../MNISThelper/t10k-images.idx3-ubyte') tY = load_train_labels('../MNISThelper/t10k-labels.idx1-ubyte') pY = predict(mat, tX) print tY[:20] print pY[:20] e = 0. for i in range(len(tY)): if pY[i] != tY[i]: e += 1 print 'error rate', e/len(tY)
mit
-2,851,466,865,995,785,000
30.559322
123
0.648228
false
kn45/LTR-DNN
train.py
1
6330
#!/usr/bin/env python import dataproc import itertools import numpy as np import random import time import sys import tensorflow as tf from collections import defaultdict from ltrdnn import LTRDNN flags = tf.flags FLAGS = flags.FLAGS # model related: flags.DEFINE_integer('vocab_size', 1532783, 'vocabulary size') flags.DEFINE_integer('emb_dim', 256, 'embedding dimension') flags.DEFINE_integer('repr_dim', 256, 'sentence representing dimension') flags.DEFINE_string('combiner', 'sum', 'how to combine words in a sentence') # training related: flags.DEFINE_string('train_file', '', 'training data file') flags.DEFINE_string('valid_file', '', 'validation data file') flags.DEFINE_string('test_file', '', 'testing data file') flags.DEFINE_integer('train_bs', 128, 'train batch size') flags.DEFINE_integer('max_epoch', 1, 'max epoch') flags.DEFINE_integer('max_iter', 1000, 'max iteration') flags.DEFINE_float('eps', 1.0, 'zero-loss threshold epsilon in hinge loss') flags.DEFINE_integer('eval_steps', 20, 'every how many steps to evaluate') flags.DEFINE_string('model_ckpt_file', './model_ckpt/model.ckpt', 'model file') flags.DEFINE_string('embedding_file', './words_embedding', 'embedding file') # log related: flags.DEFINE_string('log_path', './log', 'log path') def load_embedding(embf, vocab_size, emb_size): """load pretrained embedding mat from file. """ # create a random word_embedding list. # emb = [np.random.uniform(-0.2, 0.2, emb_size) for i in range(vocab_size)] emb = np.zeros((vocab_size, emb_size)) with open(embf) as f: for nl, line in enumerate(f): flds = line.rstrip(' \n').split(' ') word_idx = int(flds[0]) vec = map(float, flds[1:]) emb[word_idx] = np.array(vec) return np.array(emb) def inp_fn(data): """Extract training data. @data : line in training file. @return : training data in required format """ def _random_choose(l): return random.sample(l, 1)[0] sp_feed = defaultdict(list) batch_size = len(data) seq_len = 0 for i, inst in enumerate(data): flds = inst.split('\t') query = map(int, flds[0].split(' ')) pos_title_num = int(flds[1]) pos_titles = flds[2:2+pos_title_num] neg_title_num = int(flds[2+pos_title_num]) neg_titles = flds[2+pos_title_num+1:] pos_title = _random_choose(pos_titles) pos_title = map(int, pos_title.split(' ')) neg_title = _random_choose(neg_titles) neg_title = map(int, neg_title.split(' ')) seq_len = max(seq_len, len(query), len(pos_title), len(neg_title)) for j, word_id in enumerate(query): sp_feed['qry_idx'].append([i, j]) sp_feed['qry_val'].append(word_id) for j, word_id in enumerate(pos_title): sp_feed['pos_idx'].append([i, j]) sp_feed['pos_val'].append(word_id) for j, word_id in enumerate(neg_title): sp_feed['neg_idx'].append([i, j]) sp_feed['neg_val'].append(word_id) return (sp_feed['qry_idx'], sp_feed['qry_val'], [batch_size, seq_len]), \ (sp_feed['pos_idx'], sp_feed['pos_val'], [batch_size, seq_len]), \ (sp_feed['neg_idx'], sp_feed['neg_val'], [batch_size, seq_len]) def eval_fn(inst): """Extract evaluating data. @inst : line in evaluating file. @return : evaluating data in required format """ def _max_len(lst): return max([len(x) for x in lst]) flds = inst.split('\t') qrys = flds[0:1] pos_num = int(flds[1]) poss = flds[2:2+pos_num] neg_num = int(flds[2+pos_num]) negs = flds[2+pos_num+1:] qrys = [map(int, x.split(' ')) for x in qrys] poss = [map(int, x.split(' ')) for x in poss] negs = [map(int, x.split(' ')) for x in negs] seq_len = max(_max_len(qrys), _max_len(poss), _max_len(negs)) batch_size = len(qrys) * len(poss) * len(negs) sp_feed = defaultdict(list) for i, (qry, pos, neg) in enumerate(itertools.product(qrys, poss, negs)): for j, word_id in enumerate(qry): sp_feed['qry_idx'].append([i, j]) sp_feed['qry_val'].append(word_id) for j, word_id in enumerate(pos): sp_feed['pos_idx'].append([i, j]) sp_feed['pos_val'].append(word_id) for j, word_id in enumerate(neg): sp_feed['neg_idx'].append([i, j]) sp_feed['neg_val'].append(word_id) return (sp_feed['qry_idx'], sp_feed['qry_val'], [batch_size, seq_len]), \ (sp_feed['pos_idx'], sp_feed['pos_val'], [batch_size, seq_len]), \ (sp_feed['neg_idx'], sp_feed['neg_val'], [batch_size, seq_len]) train_freader = dataproc.BatchReader(FLAGS.train_file, FLAGS.max_epoch) with open(FLAGS.valid_file) as f: valid_data = [x.rstrip('\n') for x in f.readlines()] valid_q, valid_pt, valid_nt = inp_fn(valid_data) mdl = LTRDNN( vocab_size=FLAGS.vocab_size, emb_dim=FLAGS.emb_dim, repr_dim=FLAGS.repr_dim, combiner=FLAGS.combiner, eps=FLAGS.eps) sess = tf.Session() file_writer = tf.summary.FileWriter(FLAGS.log_path, sess.graph) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) print 'loading pretrained embedding from file' pretrained_emb = load_embedding( FLAGS.embedding_file, FLAGS.vocab_size, FLAGS.emb_dim) mdl.assign_embedding(sess, pretrained_emb) metrics = ['loss'] print 'train begin...' for niter in xrange(FLAGS.max_iter): batch_data = train_freader.get_batch(FLAGS.train_bs) if not batch_data: break train_q, train_pt, train_nt = inp_fn(batch_data) mdl.train_step(sess, train_q, train_pt, train_nt) if niter % FLAGS.eval_steps != 0: continue train_eval = mdl.eval_step(sess, train_q, train_pt, train_nt, metrics) valid_eval = mdl.eval_step(sess, valid_q, valid_pt, valid_nt, metrics) ntime = time.strftime('%Y%m%d_%H:%M:%S', time.localtime(time.time())) print ntime, niter, \ 'train_loss:', train_eval, 'valid_loss:', valid_eval save_path = mdl.saver.save( sess, FLAGS.model_ckpt_file, global_step=mdl.global_step, write_meta_graph=False) print 'model saved:', save_path with open(FLAGS.test_file) as feval: acc = mdl.pairwise_accuracy(sess, feval, eval_fn) print 'pairwise accuracy:', acc sess.close()
mit
8,209,930,828,133,903,000
36.455621
79
0.626382
false
Usherwood/usherwood_ds
usherwood_ds/twitter_influencers.py
1
15610
#!/usr/bin/env python """Class for finding Twitter influencers""" __author__ = "Peter J Usherwood" __python_version__ = "3.6" import pandas as pd import numpy as np import csv import json import os from scipy.stats import percentileofscore import progressbar from usherwood_ds.data_imports.twitter_import import create_twitter_user_df from usherwood_ds.data_imports.twitter_api.api_class import TwitterAPI def influencer_identification(handles, save_path='', TOP_X_CONNECTED=2000, api_credentials=None, inc_tiers=True, tiers=[1500, 5000, 20000, 100000], TOP_X_PER_TIER=-1): """ Run the analysis to find the top influential accounts on Twitter. This is the full influencer analysis, for a quicker insight run interests_identification. :param handles: List of Twitter handles :param save_path: path of where save the dataframes to :param TOP_X_CONNECTED: Int, take the top_x_connect influencers :param api_credentials: Dict, api credentials :param inc_tiers: Bool, divide rankings by number of followers :param tiers: List, ascending list of integers as the upper boundaries of follower numbers per tier, a final tier will be added for uses with more followers than your last divide :param TOP_X_PER_TIER: int, keep only top x per influence tier, -1 is all, good for power BI """ if api_credentials is None: with open(os.path.join(os.path.dirname(__file__), "data_imports/api_credentials.json"), 'r') as openfile: api_credentials = json.load(openfile) api = TwitterAPI(api_credentials=api_credentials) print('Fortifying target market') target_market, TM_SIZE = fortify_tm_with_previous_posts(handles=handles, save_path=save_path, api=api) print('Getting sphere of influence') influencers = get_sphere_of_influence(target_market=target_market, save_path=save_path, api=api) print('Fortifying sphere of influence and getting amplification') influencers = get_amplification_influencers(influencers=influencers, api=api, TM_SIZE=TM_SIZE, TOP_X_CONNECTED=TOP_X_CONNECTED, save_path=save_path, tiers=tiers, TOP_X_PER_TIER=TOP_X_PER_TIER) print('Calculating Engagement and overall influence') influencers = get_engagement_influencers(influencers=influencers, target_market=target_market, save_path=save_path, TOP_X_PER_TIER=TOP_X_PER_TIER) print('Done') return target_market, influencers def interests_identification(handles, save_path='', TOP_X_CONNECTED=2000, api_credentials=None, TOP_X_PER_TIER=-1): """ Run the analysis to find the top amplifying accounts on Twitter, good for identifying interests or quick influencer analysis. For full influencer analysis use the influencers_identification function as it calculates engagement scores :param handles: List of Twitter handles :param save_path: path of where save the dataframes to :param TOP_X_CONNECTED: Int, take the top_x_connect influencers :param api_credentials: Dict, api credentials :param TOP_X_PER_TIER: int, keep only top x per influence tier, -1 is all, good for power BI """ if api_credentials is None: with open(os.path.join(os.path.dirname(__file__), "../api_credentials.json"), 'r') as openfile: api_credentials = json.load(openfile) api = TwitterAPI(api_credentials=api_credentials) print('Fortifying target market') target_market, TM_SIZE = fortify_tm_without_engamements(handles=handles, save_path=save_path, api=api) print('Getting sphere of influence') influencers = get_sphere_of_influence(target_market, save_path=save_path, api=api) print('Fortifying sphere of influence and getting amplification') influencers = get_amplification_influencers(influencers=influencers, api=api, TM_SIZE=TM_SIZE, TOP_X_CONNECTED=TOP_X_CONNECTED, save_path=save_path, TOP_X_PER_TIER=TOP_X_PER_TIER) print('Done') return target_market, influencers def fortify_tm_without_engamements(handles, api, save_path=''): """ fortify the tm with user info without engagement measures :param handles: List of Twitter handles :param api: TwitterAPI instance :param save_path: path of where save the dataframes to :return: target_market - pandas df of fortified Twitter users and their engagements """ users = api.fortify_twitter_users_batch(usernames=handles) TM_SIZE = len(users) print(TM_SIZE) target_market_arr = [] for user in users: target_market_arr += [api.parse_user_to_twitter_user(user)] target_market = create_twitter_user_df(target_market_arr) target_market.to_csv(save_path+'TM.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False) return target_market, TM_SIZE def fortify_tm_with_previous_posts(handles, api, max_tweets=100, save_path=''): """ fortify the tm with user info and past max_tweets for engagement measures :param handles: List of Twitter handles :param api: TwitterAPI instance :param max_tweets: Int, this is the number of tweets the engagement will be based on :param save_path: path of where save the dataframes to :return: target_market - pandas df of fortified Twitter users and their engagements """ engagements = [] users = [] for handle in handles: try: tweets, user = api.get_user_tweets(username=handle, max_number=max_tweets) print(user['screen_name']) users += [user] at_mentions = [] reply_to = [] retweets = [] for tweet in tweets: try: user_mention_blocks = tweet['entities']['user_mentions'] for block in user_mention_blocks: at_mentions += [block['id']] except Exception as e: pass try: if tweet['in_reply_to_user_id']: reply_to += [tweet['in_reply_to_user_id']] except Exception as e: pass try: retweets += [tweet['retweeted_status']['user']['id']] except Exception as e: pass engagements.append(at_mentions + reply_to + retweets) except Exception as e: print(e) target_market_arr = [] for user in users: target_market_arr += [api.parse_user_to_twitter_user(user)] target_market = create_twitter_user_df(target_market_arr) target_market['Engagements in Past 100 Tweets'] = engagements target_market = target_market[target_market['Engagements in Past 100 Tweets'].astype(str) != '[]'] TM_SIZE = len(target_market) target_market.to_csv(save_path+'TM.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False) return target_market, TM_SIZE def get_sphere_of_influence(target_market, api, save_path=''): """ Get the people the target market are following and rank by the most connected :param target_market: :param api: TwitterAPI instance :param save_path: path of where save the dataframes to :return: partially populated influencers df """ sphere = [] with progressbar.ProgressBar(max_value=len(target_market['Twitter Author ID'])) as bar: for i, user_id in enumerate(target_market['Twitter Author ID'].values.tolist()): friends = api.get_user_friends_ids(user_id=user_id, max_number=5000) if friends is None: pass else: sphere += friends bar.update(i) influencers = pd.DataFrame(pd.Series(sphere).value_counts()).reset_index().rename( columns={'index': 'Twitter Author ID', 0: 'TM Amplification'}) influencers.to_csv(save_path+'Influencers.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False) return influencers def get_amplification_influencers(TM_SIZE, api, TOP_X_CONNECTED=2000, save_path='', influencers=None, load_from_disk=False, load_path='', tiers=[1500,5000,20000,100000], TOP_X_PER_TIER=-1): """ Fortify the influencers df for the top_x_connected influencers :param influencers: influencers df output from get_sphere_of_influence :param TM_SIZE: Int, the size of the target market :param TOP_X_CONNECTED: Int, take the top_x_connect influencers :param save_path: path of where save the dataframes to :param load_from_disk: Bool, load previously ran influencer sdata from disk :param load_path: Str, path to the saved data if it is to be loaded, files must be named TM.csv and Influencers.csv :param tiers: List, ascending list of integers as the upper boundaries of follower numbers per tier, a final tier will be added for uses with more followers than your last divide :param TOP_X_PER_TIER: int, keep only top x per influence tier, -1 is all, good for power BI :return: partially populated influencers df """ if load_from_disk: influencers = pd.read_csv(load_path+'Influencers.csv') influencers = influencers[:TOP_X_CONNECTED] influencers_jsons = api.fortify_twitter_users_batch(user_ids=influencers['Twitter Author ID'].values.tolist()) influencers_arr = [] for user in influencers_jsons: influencers_arr += [api.parse_user_to_twitter_user(user)] influencers_fort = create_twitter_user_df(influencers_arr) influencers_fort['Twitter Author ID'] = influencers_fort['Twitter Author ID'].astype(np.int64) influencers = influencers_fort.merge(influencers, how='inner', on='Twitter Author ID') influencers['Amplification Index'] = influencers[['Follower Count', 'TM Amplification']].apply( lambda x: x[1] * (TM_SIZE / x[0]), axis=1) influencers.sort_values(by='Amplification Index', inplace=True, ascending=False) influencers['Tier'] = 0 tiers = tiers.copy() tiers = [0]+tiers+[9999999999] influencers = apply_tiers(influencers, tiers) influencers = run_indexing(influencers=influencers, base_column_name='Amplification Index', TOP_X_PER_TIER=-1, reindexed_column_name='Amplification Index') influencers = run_indexing(influencers=influencers, base_column_name='Amplification Index', TOP_X_PER_TIER=TOP_X_PER_TIER, reindexed_column_name='Amplification Index PowerBI') influencers.reset_index(drop=True, inplace=True) influencers['Channel'] = 'Twitter' influencers.to_csv(save_path+'Influencers.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False) return influencers def get_engagement_influencers(target_market=None, influencers=None, save_path='', load_from_disk=False, load_path='', TOP_X_PER_TIER=-1): """ Fortify influencers df with amplification :param influencers: influencers df :param target_market: target market df :param save_path: path of where save the dataframes to :param load_from_disk: Bool, load previously ran influencer sdata from disk :param load_path: Str, path to the saved data if it is to be loaded, files must be named TM.csv and influencers.csv :param TOP_X_PER_TIER: int, keep only top x per influence tier, -1 is all, good for power BI :return: influencers df fortified with tm engagement and overall influence """ if load_from_disk: influencers = pd.read_csv(load_path+'influencers.csv') target_market = pd.read_csv(load_path+'TM.csv') if isinstance(target_market['Engagements in Past 100 Tweets'].iloc[0], str): target_market['Engagements in Past 100 Tweets'] = target_market['Engagements in Past 100 Tweets']\ .apply(lambda e: eval(e)) all_tm_engagements = [item for sublist in target_market['Engagements in Past 100 Tweets'].values.tolist() for item in sublist] def get_tm_eng_count(influencer_id, all_tm_engagements): eng_counts = len([1 for ix in all_tm_engagements if ix == influencer_id]) return eng_counts influencers['TM Engagement'] = influencers['Twitter Author ID'].apply( lambda x: get_tm_eng_count(x, all_tm_engagements)) influencers['Engagement Index'] = influencers['TM Engagement']/influencers['TM Amplification'] influencers = run_indexing(influencers=influencers, base_column_name='Engagement Index', TOP_X_PER_TIER=-1, reindexed_column_name='Engagement Index') influencers = run_indexing(influencers=influencers, base_column_name='Engagement Index', TOP_X_PER_TIER=TOP_X_PER_TIER, reindexed_column_name='Engagement Index PowerBI') influencers['Influence Index'] = (influencers['Engagement Index']+influencers['Amplification Index'])/2.0 influencers['Influence Index PowerBI'] = (influencers['Engagement Index PowerBI']\ + influencers['Amplification Index PowerBI']) / 2.0 influencers.to_csv(save_path+'Influencers.csv', encoding='utf-8', quoting=csv.QUOTE_ALL, index=False) return influencers def apply_tiers(influencers, tiers): for tier_ix in range(len(tiers) - 1): sub = influencers[(influencers['Follower Count'] >= tiers[tier_ix]) & (influencers['Follower Count'] < tiers[tier_ix + 1])] influencers.ix[sub.index, 'Tier'] = 'Tier ' + str(tier_ix + 1) return influencers def run_indexing(influencers, TOP_X_PER_TIER=-1, base_column_name='Amplification Index', reindexed_column_name='Amplification Index'): if reindexed_column_name not in influencers.columns: influencers[reindexed_column_name] = 0 for tier_ix in influencers['Tier'].value_counts().index: sub = influencers[(influencers['Tier'] == tier_ix)] arr = sorted(sub[base_column_name].values) if TOP_X_PER_TIER >= 0: arr = arr[-TOP_X_PER_TIER:] influencers.ix[sub.index, reindexed_column_name] = sub[base_column_name]. \ apply(lambda e: percentileofscore(arr, e)).values return influencers
bsd-2-clause
-2,693,658,205,263,793,700
41.767123
119
0.608969
false
HanKruiger/tsnetwork
src/tsnetwork.py
1
17075
#!/bin/python3 import shutil import graph_tool.all as gt import numpy as np import modules.distance_matrix as distance_matrix import modules.graph_io as graph_io import modules.layout_io as layout_io import modules.thesne as thesne import modules.user_input as usr_input import modules.animation as animations from modules.tsn_config import TsnConfig from modules.sfdp_layout import sfdp_placement def main(): import sys import os.path import glob import itertools from argparse import ArgumentParser parser = ArgumentParser(description='Read a graph, and produce a layout with t-SNE.') # Input parser.add_argument('graphs', nargs='+', help='(List of) input graph(s). Or a folder with graphs.') # Output parser.add_argument('-o', default='./output', help='Folder to write output to. Default: ./output') parser.add_argument('--save_every', type=int, help='Save a jpg snapshot ever x epochs.') parser.add_argument('--render_video', action='store_true', help='Render a video of the layout evolution. Needs ImageMagick and ffmpeg.') parser.add_argument('--retain_snaps', action='store_true', help='Retain the snapshots. This argument is ignored if no video is rendered.') parser.add_argument('--save_layout_data', action='store_true', help='Save all layout coordinates in a .pickle file and a .txt file.') parser.add_argument('--opacity', type=float, default=0.3, help='Edge opacity.') # Manipulations to graph parser.add_argument('--strip_graph', action='store_true', help='Retain only the largest connected component in the graph.') parser.add_argument('--rnd_seed', '-r', type=int, nargs='+', default=[None], help='Seed for random state. (Default: Random seed)') parser.add_argument('--pre_sfdp', action='store_true', help='If this flag is given, the vertices will be pre-initialized with SFDP.') parser.add_argument('--only_sfdp', action='store_true', help='If this flag is given, only SFDP will be done.') parser.add_argument('--accept_all_sfdp', action='store_true', help='If this flag is given, no confirmation is asked for the SFDP layouts.') parser.add_argument('--remove_rnd_edges', nargs='+', type=float, default=[0], help='Mutate the graph by removing random edges. If this is used without a random seed, a random random seed will be generated. The value given to this argument is the fraction of edges that will be removed.') # Hyperparameters parser.add_argument('--n_epochs', '-e', nargs='+', type=int, default=[1000], help='One or more numbers of t-SNE epochs.') parser.add_argument('--lr_init', nargs='+', type=float, default=[80], help='One or more initial learning rates.') parser.add_argument('--lr_final', nargs='+', type=float, default=[None], help='One or more final learning rates. Default: Same as lr_init.') parser.add_argument('--lr_switch', nargs='+', type=int, default=[None], help='One or more learning rate switch-points.') parser.add_argument('--momentum_init', nargs='+', type=float, default=[0.5], help='One or more initial momenta.') parser.add_argument('--momentum_final', nargs='+', type=float, default=[0.5], help='One or more initial momenta.') parser.add_argument('--momentum_switch', nargs='+', type=int, default=[None], help='One or more momentum switch-points.') # Distance metric parameters parser.add_argument('--distance_metric', '-d', choices=['shortest_path', 'spdm', 'modified_adjacency', 'mam'], default='spdm', help='The distance metric that is used for the pairwise distances.') parser.add_argument('-k', nargs='+', type=float, default=[1], help='Exponent for transfer function.') # Cost function parameters # Kullback-Leibler parser.add_argument('--perplexity', '-p', nargs='+', type=float, default=[80], help='One or more perplexities.') parser.add_argument('--l_kl_init', nargs='+', type=float, default=[1], help='One or more KL factors.') parser.add_argument('--l_kl_final', nargs='+', type=float, default=[1], help='One or more KL factors.') parser.add_argument('--l_kl_switch', nargs='+', type=int, default=[None], help='One or more KL switch-points') # Edge contraction parser.add_argument('--l_e_init', nargs='+', type=float, default=[0], help='One or more edge contraction factors.') parser.add_argument('--l_e_final', nargs='+', type=float, default=[0], help='One or more edge contraction factors.') parser.add_argument('--l_e_switch', nargs='+', type=int, default=[None], help='One or more edge contraction switch-points') # Compression parser.add_argument('--l_c_init', nargs='+', type=float, default=[1.2], help='One or more compression factors.') parser.add_argument('--l_c_final', nargs='+', type=float, default=[0], help='One or more compression factors.') parser.add_argument('--l_c_switch', nargs='+', type=int, default=[None], help='One or more compression switch-points') # Repulsion parser.add_argument('--l_r_init', nargs='+', type=float, default=[0], help='One or more repulsion factors.') parser.add_argument('--l_r_final', nargs='+', type=float, default=[0.5], help='One or more repulsion factors.') parser.add_argument('--l_r_switch', nargs='+', type=int, default=[None], help='One or more repulsion switch-points') parser.add_argument('--r_eps', nargs='+', type=float, default=[0.2], help='Additional term in denominator to prevent near-singularities.') args = parser.parse_args() # Retrieve a list of all files in the directory, if args.graphs[0] is a directory. if len(args.graphs) == 1 and os.path.isdir(args.graphs[0]): args.graphs = glob.glob(args.graphs[0] + '/*') # Check graph input for g_file in args.graphs: if not os.path.isfile(g_file): raise FileNotFoundError(g_file + ' is not a file.') # Generate random random seed if none is given. if args.rnd_seed == [None]: args.rnd_seed = [np.random.randint(1e8)] # Ignore retain_snaps argument if no video is rendered. if not args.render_video: args.retain_snaps = True # Get names of the graphs (by splitting of path and extension) names = [os.path.split(os.path.splitext(file)[0])[1] for file in args.graphs] # Determine output folders. One is created in the specified output folder # for every graph that is supplied. output_folders = [args.o + '/' + name for name in names] # Check (and possibly create) output folders for folder in [args.o] + output_folders: if not os.path.exists(folder): os.makedirs(folder) # At least everything is fine for now. there_were_exceptions = False # Loop over all graphs (and their respective output folders) for g_file, g_name, output_folder in zip(args.graphs, names, output_folders): # Load the graph g = graph_io.load_graph(g_file) print('[tsnetwork] Loaded graph {0} (|V| = {1}, |E| = {2}) into memory.'.format(g_name, g.num_vertices(), g.num_edges())) # Add graph name as propery in the internal representation g.graph_properties['name'] = g.new_graph_property('string', g_name) # Usually this loop has just one iteration, with only 0 as the value # for rmv_edge_frac (that is, no edges are removed). for rmv_edge_frac in args.remove_rnd_edges: print('[tsnetwork] Original graph: (|V|, |E|) = ({0}, {1}).'.format(g.num_vertices(), g.num_edges())) # Create a temporary copy of the graph that will be manipulated. gv = gt.GraphView(g) # Remove rmv_edge_frac of the graphs edges from gv. gv.clear_filters() gv.reindex_edges() edge_list = list(gv.edges()) not_here_ep = gv.new_edge_property('bool', val=True) n_remove_edges = int(rmv_edge_frac * gv.num_edges()) for e in np.random.randint(0, gv.num_edges(), n_remove_edges): not_here_ep[edge_list[e]] = False gv.set_edge_filter(not_here_ep) if n_remove_edges > 0: print('[tsnetwork] Removed {2} random edges: (|V|, |E|) = ({0}, {1}).'.format(gv.num_vertices(), gv.num_edges(), n_remove_edges)) # Filter the graph s.t. only the largest connected component # remains. if args.strip_graph: largest_connected_component = gt.label_largest_component(gv) gv.set_vertex_filter(largest_connected_component) gv.purge_vertices() print('[tsnetwork] Filtered largest component: (|V|, |E|) = ({0}, {1}).'.format(gv.num_vertices(), gv.num_edges())) if args.pre_sfdp or args.only_sfdp: # Perform a SFDP layout (either as the only layout or as a # starting point for t-SNE.) Y_init, _ = sfdp_placement(gv, output_folder, ask_for_acceptance=not args.accept_all_sfdp, opacity=args.opacity) if args.only_sfdp: continue else: # Random positions will be generated Y_init = None # Compute distance matrix of this graph with the specified metric X = distance_matrix.get_distance_matrix(gv, args.distance_metric) # Retrieve the adjacency matrix of the graph Adj_sparse = gt.adjacency(gv) Adj = np.zeros(Adj_sparse.shape, dtype='float32') for i, j in zip(*Adj_sparse.nonzero()): Adj[i, j] = Adj_sparse[i, j] # Make list of tsnetwork configuration objects. These are objects # that represent a configuration for a t-SNE layout. tsn_configs = [] for perplexity, n_epochs, initial_lr, final_lr, lr_switch, initial_momentum,\ final_momentum, momentum_switch,\ initial_l_kl, final_l_kl, l_kl_switch,\ initial_l_e, final_l_e, l_e_switch,\ initial_l_c, final_l_c, l_c_switch,\ initial_l_r, final_l_r, l_r_switch,\ r_eps, k, rnd_seed in itertools.product( args.perplexity, args.n_epochs, args.lr_init, args.lr_final, args.lr_switch, args.momentum_init, args.momentum_final, args.momentum_switch, args.l_kl_init, args.l_kl_final, args.l_kl_switch, args.l_e_init, args.l_e_final, args.l_e_switch, args.l_c_init, args.l_c_final, args.l_c_switch, args.l_r_init, args.l_r_final, args.l_r_switch, args.r_eps, args.k, args.rnd_seed): # Use 50% for the switching points if no argument is given if lr_switch is None: lr_switch = int(n_epochs * 0.5) if momentum_switch is None: momentum_switch = int(n_epochs * 0.5) if l_kl_switch is None: l_kl_switch = int(n_epochs * 0.5) if l_e_switch is None: l_e_switch = int(n_epochs * 0.5) if l_c_switch is None: l_c_switch = int(n_epochs * 0.5) if l_r_switch is None: l_r_switch = int(n_epochs * 0.5) if final_lr is None: final_lr = initial_lr cfg = TsnConfig( perplexity=perplexity, n_epochs=n_epochs, initial_lr=initial_lr, final_lr=final_lr, lr_switch=lr_switch, initial_momentum=initial_momentum, final_momentum=final_momentum, momentum_switch=momentum_switch, initial_l_kl=initial_l_kl, final_l_kl=final_l_kl, l_kl_switch=l_kl_switch, initial_l_e=initial_l_e, final_l_e=final_l_e, l_e_switch=l_e_switch, initial_l_c=initial_l_c, final_l_c=final_l_c, l_c_switch=l_c_switch, initial_l_r=initial_l_r, final_l_r=final_l_r, l_r_switch=l_r_switch, r_eps=r_eps, k=k, pre_sfdp=args.pre_sfdp, rmv_edge_frac=rmv_edge_frac, rnd_seed=rnd_seed, distance_matrix=args.distance_metric ) # Do no add the configurations that already have files matching # the description, unless the user confirms to overwrite. if any([file.startswith(cfg.get_description() + '.') for file in os.listdir(output_folder)]): if not usr_input.confirm('[tsnetwork] ' + cfg.get_description() + ' files exists! Overwrite?'): continue tsn_configs.append(cfg) # Loop over the t-SNE configurations for a single graph for cfg in tsn_configs: print('[tsnetwork] Processing: ' + cfg.get_description()) # String that has the path to the directory where the snapshots # will come. (If --save_every is given) snaps_dir = output_folder + '/snaps_' + cfg.get_description() # Clean out existing snaps directory if it exists. if args.save_every is not None and os.path.exists(snaps_dir): if usr_input.confirm('[tsnetwork] ' + snaps_dir + ' exists. Delete contents?'): for file in os.listdir(snaps_dir): file_path = os.path.join(snaps_dir, file) try: if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) elif args.save_every is not None and not os.path.exists(snaps_dir): # Make folder for snaps, if it is necessary and it doesn't # exist yet. os.makedirs(snaps_dir) # Apply the transfer function X_transfered = X ** cfg.k # Try to do the tsne layout. try: Y, costs = thesne.tsne(X_transfered, random_state=cfg.rnd_seed, perplexity=cfg.perplexity, n_epochs=cfg.n_epochs, Y=Y_init, initial_lr=cfg.initial_lr, final_lr=cfg.final_lr, lr_switch=cfg.lr_switch, initial_momentum=cfg.initial_momentum, final_momentum=cfg.final_momentum, momentum_switch=cfg.momentum_switch, initial_l_kl=cfg.initial_l_kl, final_l_kl=cfg.final_l_kl, l_kl_switch=cfg.l_kl_switch, initial_l_e=cfg.initial_l_e, final_l_e=cfg.final_l_e, l_e_switch=cfg.l_e_switch, initial_l_c=cfg.initial_l_c, final_l_c=cfg.final_l_c, l_c_switch=cfg.l_c_switch, initial_l_r=cfg.initial_l_r, final_l_r=cfg.final_l_r, l_r_switch=cfg.l_r_switch, r_eps=cfg.r_eps, Adj=Adj, g=gv, snaps_output_folder=snaps_dir, save_every=args.save_every) except (thesne.NaNException, thesne.SigmaTooLowException) as e: there_were_exceptions = True print('[exception] {0}'.format(e)) # Also write exception to a file. with open(output_folder + '/exception_' + cfg.get_description() + '.out', 'w') as f: print('{0}'.format(e), file=f) f.close() print('[tsnetwork] Continuing with next TsnConfig.') continue # Render an animation of the snapshots if args.render_video: animations.save_animation(snaps_dir, cfg.get_description()) # Remove the directory with snapshots. if args.save_every is not None and not args.retain_snaps and os.path.exists(snaps_dir): print('[tsnetwork] Cleaning up snaps directory.') shutil.rmtree(snaps_dir) # Save the data (graph, vertex coordinates) if args.save_layout_data: layout_io.save_vna_layout(output_folder + '/layout_' + cfg.get_description() + '.vna', gv, Y) layout_io.save_layout_txt(output_folder + '/layout_edges_' + cfg.get_description() + '.txt', gv, Y) # Save final drawing of the layout layout_io.save_drawing(output_folder, gv, Y.T, cfg.get_description(), formats=['jpg', 'pdf'], edge_colors="rgb", draw_vertices=False, opacity=args.opacity) if there_were_exceptions: print('[tsnetwork] Done! However, be wary. There were exceptions.') else: print('[tsnetwork] Done!') if __name__ == '__main__': main()
mit
8,829,132,729,099,388,000
56.107023
291
0.586413
false
alexras/boomslang
BrokenBarH.py
1
1944
from matplotlib import pyplot from PlotInfo import PlotInfo class BrokenBarH(PlotInfo): """ A plot element that represents a collection of horizontal bars spanning a sequence of [xMin, xMax] ranges at a given y. """ def __init__(self, y=None, xMins = [], xWidths = [], yWidth=0.6, linewidth=1.0, color="black", edgeColor=None, **kwargs): super(BrokenBarH,self).__init__("broken barh", **kwargs) self.y = y """ The y coordinate for this collection of horizontal bars """ self.xMins = xMins """ A list of locations for the start xValues of the horizontal bars """ self.xWidths = xWidths """ A list of widths of the horizontal bars """ self.yWidth = yWidth """ The bar's width along the y axis """ self.linewidth = linewidth """ The width of the line that borders the bars """ self.color = color """ The bar's color. See :ref:`styling-colors` for valid colors. """ self.edgeColor = edgeColor """ The color of the line that borders the bars """ def draw(self, fig, axis, transform=None): # Draw only if we have a valid y value for the horizontal bars if self.y is None: return [[], []] xranges = [(self.xMins[i], self.xWidths[i]) for i in xrange(len(self.xMins))] yrange = (self.y - 0.5 * self.yWidth, self.yWidth) kwdict = self.getAttributes() kwdict["linewidth"] = self.linewidth kwdict["edgecolor"] = self.edgeColor kwdict["facecolors"] = self.color kwdict["label"] = self.label return [[axis.broken_barh(xranges, yrange, **kwdict)], [self.label]]
bsd-3-clause
-6,954,721,040,625,368,000
26.771429
85
0.527263
false
amccaugh/phidl
phidl/device_layout.py
1
99945
# -*- coding: utf-8 -*- #============================================================================== # Major TODO #============================================================================== # Add D.add_gdsii_path() to allow creation of GDSII paths # Add D.write_gds(max_points_per_polygon) #============================================================================== # Minor TODO #============================================================================== # Add Group.get_polygons() # Allow Boolean to use Groups # Add pp.delay_sine(distance = 10, length = 20, num_periods = 2) # add wire_basic to phidl.routing. also add endcap parameter # Allow connect(overlap) to be a tuple (0, 0.7) # Possibly replace gdspy bezier (font rendering) with # https://stackoverflow.com/a/12644499 #============================================================================== # Documentation TODO #============================================================================== # Tutorials # - Using Aliases # - Boolean operations # - Advanced and Misc (simplify) # Examples # - An electrical device with contact pads # - An optoelectronic device # - Waveguide + LED # - route_manhattan #============================================================================== # Imports #============================================================================== from __future__ import division # Otherwise integer division e.g. 20 / 7 = 2 from __future__ import print_function # Use print('hello') instead of print 'hello' from __future__ import absolute_import import gdspy from copy import deepcopy import numpy as np from numpy import sqrt, mod, pi, sin, cos from numpy.linalg import norm import warnings import hashlib from phidl.constants import _CSS3_NAMES_TO_HEX # Remove this once gdspy fully deprecates current_library import gdspy.library gdspy.library.use_current_library = False __version__ = '1.5.2' #============================================================================== # Useful transformation functions #============================================================================== def _rotate_points(points, angle = 45, center = (0, 0)): """ Rotates points around a centerpoint defined by ``center``. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- points : array-like[N][2] Coordinates of the element to be rotated. angle : int or float Angle to rotate the points. center : array-like[2] Centerpoint of rotation. Returns ------- A new set of points that are rotated around ``center``. """ if angle == 0: return points angle = angle * pi/180 ca = cos(angle) sa = sin(angle) sa = np.array((-sa, sa)) c0 = np.array(center) if np.asarray(points).ndim == 2: return (points - c0)*ca + (points - c0)[:,::-1]*sa + c0 if np.asarray(points).ndim == 1: return (points - c0)*ca + (points - c0)[::-1]*sa + c0 def _reflect_points(points, p1 = (0, 0), p2 = (1, 0)): """ Reflects points across the line formed by p1 and p2. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- points : array-like[N][2] Coordinates of the element to be reflected. p1 : array-like[2] Coordinates of the start of the reflecting line. p2 : array-like[2] Coordinates of the end of the reflecting line. Returns ------- A new set of points that are reflected across ``p1`` and ``p2``. """ # From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line points = np.array(points); p1 = np.array(p1); p2 = np.array(p2) if np.asarray(points).ndim == 1: return 2*(p1 + (p2-p1)*np.dot((p2-p1),(points-p1))/norm(p2-p1)**2) - points if np.asarray(points).ndim == 2: return np.array([2*(p1 + (p2-p1)*np.dot((p2-p1),(p-p1))/norm(p2-p1)**2) - p for p in points]) def _is_iterable(items): """ Checks if the passed variable is iterable. Parameters ---------- items : any Item to check for iterability. """ return isinstance(items, (list, tuple, set, np.ndarray)) def _parse_coordinate(c): """ Translates various inputs (lists, tuples, Ports) to an (x,y) coordinate. Parameters ---------- c : array-like[N] or Port Input to translate into a coordinate. Returns ------- c : array-like[2] Parsed coordinate. """ if isinstance(c, Port): return c.midpoint elif np.array(c).size == 2: return c else: raise ValueError('[PHIDL] Could not parse coordinate, input should be array-like (e.g. [1.5,2.3] or a Port') def _parse_move(origin, destination, axis): """ Translates various input coordinates to changes in position in the x- and y-directions. Parameters ---------- origin : array-like[2] of int or float, Port, or key Origin point of the move. destination : array-like[2] of int or float, Port, key, or None Destination point of the move. axis : {'x', 'y'} Direction of move. Returns ------- dx : int or float Change in position in the x-direction. dy : int or float Change in position in the y-direction. """ # If only one set of coordinates is defined, make sure it's used to move things if destination is None: destination = origin origin = [0,0] d = _parse_coordinate(destination) o = _parse_coordinate(origin) if axis == 'x': d = (d[0], o[1]) if axis == 'y': d = (o[0], d[1]) dx,dy = np.array(d) - o return dx,dy def _distribute(elements, direction = 'x', spacing = 100, separation = True, edge = None): """ Takes a list of elements and distributes them either equally along a grid or with a fixed spacing between them. Parameters ---------- elements : array-like of PHIDL objects Elements to distribute. direction : {'x', 'y'} Direction of distribution; either a line in the x-direction or y-direction. spacing : int or float Distance between elements. separation : bool If True, guarantees elements are speparated with a fixed spacing between; if False, elements are spaced evenly along a grid. edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'} Which edge to perform the distribution along (unused if separation == True) Returns ------- elements : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group Distributed elements. """ if len(elements) == 0: return elements if direction not in ({'x','y'}): raise ValueError("[PHIDL] distribute(): 'direction' argument must be either 'x' or'y'") if (direction == 'x') and (edge not in ({'x', 'xmin', 'xmax'})) and (separation == False): raise ValueError("[PHIDL] distribute(): When `separation` == False and direction == 'x'," + " the `edge` argument must be one of {'x', 'xmin', 'xmax'}") if (direction == 'y') and (edge not in ({'y', 'ymin', 'ymax'})) and (separation == False): raise ValueError("[PHIDL] distribute(): When `separation` == False and direction == 'y'," + " the `edge` argument must be one of {'y', 'ymin', 'ymax'}") if (direction == 'y'): sizes = [e.ysize for e in elements] if (direction == 'x'): sizes = [e.xsize for e in elements] spacing = np.array([spacing]*len(elements)) if separation == True: # Then `edge` doesn't apply if direction == 'x': edge = 'xmin' if direction == 'y': edge = 'ymin' else: sizes = np.zeros(len(spacing)) # Calculate new positions and move each element start = elements[0].__getattribute__(edge) positions = np.cumsum(np.concatenate(([start], (spacing + sizes)))) for n, e in enumerate(elements): e.__setattr__(edge, positions[n]) return elements def _align(elements, alignment = 'ymax'): """ Aligns lists of PHIDL elements Parameters ---------- elements : array-like of PHIDL objects Elements to align. alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'} Which edge to align along (e.g. 'ymax' will align move the elements such that all of their topmost points are aligned) Returns ------- elements : array-like of PHIDL objects Aligned elements. """ if len(elements) == 0: return elements if alignment not in (['x','y','xmin', 'xmax', 'ymin','ymax']): raise ValueError("[PHIDL] 'alignment' argument must be one of 'x','y','xmin', 'xmax', 'ymin','ymax'") value = Group(elements).__getattribute__(alignment) for e in elements: e.__setattr__(alignment, value) return elements def _line_distances(points, start, end): if np.all(start == end): return np.linalg.norm(points - start, axis=1) vec = end - start cross = np.cross(vec, start - points) return np.divide(abs(cross), np.linalg.norm(vec)) def _simplify(points, tolerance=0): """ Ramer–Douglas–Peucker algorithm for line simplification. Takes an array of points of shape (N,2) and removes excess points in the line. The remaining points form a identical line to within `tolerance` from the original """ # From https://github.com/fhirschmann/rdp/issues/7 # originally written by Kirill Konevets https://github.com/kkonevets M = np.asarray(points) start, end = M[0], M[-1] dists = _line_distances(M, start, end) index = np.argmax(dists) dmax = dists[index] if dmax > tolerance: result1 = _simplify(M[:index + 1], tolerance) result2 = _simplify(M[index:], tolerance) result = np.vstack((result1[:-1], result2)) else: result = np.array([start, end]) return result def reset(): """ Resets the built-in Layer dictionary (controls the coloring in quickplot() ), and sets the Device universal ID (uid) to zero. """ Layer.layer_dict = {} Device._next_uid = 0 class LayerSet(object): """ Set of layer objects. """ def __init__(self): """ Initialises an empty LayerSet. """ self._layers = {} def add_layer(self, name = 'unnamed', gds_layer = 0, gds_datatype = 0, description = None, color = None, inverted = False, alpha = 0.6, dither = None): """ Adds a layer to an existing LayerSet object. Parameters ---------- name : str Name of the Layer. gds_layer : int GDSII Layer number. gds_datatype : int GDSII datatype. description : str Layer description. color : str Hex code of color for the Layer. inverted : bool If true, inverts the Layer. alpha : int or float Alpha parameter (opacity) for the Layer, value must be between 0.0 and 1.0. dither : str KLayout dither style (only used in phidl.utilities.write_lyp() ) """ new_layer = Layer(gds_layer = gds_layer, gds_datatype = gds_datatype, name = name, description = description, inverted = inverted, color = color, alpha = alpha, dither = dither) if name in self._layers: raise ValueError('[PHIDL] LayerSet: Tried to add layer named ' '"%s"' % (name) + ', but a layer with that ' 'name already exists in this LayerSet') else: self._layers[name] = new_layer def __getitem__(self, val): """ If you have a LayerSet `ls`, allows access to the layer names like ls['gold2']. Parameters ---------- val : str Layer name to access within the LayerSet. Returns ------- self._layers[val] : Layer Accessed Layer in the LayerSet. """ try: return self._layers[val] except: raise ValueError('[PHIDL] LayerSet: Tried to access layer ' 'named "%s"' % (val) + ' which does not exist') def __repr__(self): """ Prints the number of Layers in the LayerSet object. """ return ('LayerSet (%s layers total)' % (len(self._layers))) class Layer(object): """ Layer object. Parameters ---------- gds_layer : int GDSII Layer number. gds_datatype : int GDSII datatype. name : str Name of the Layer. color : str Hex code of color for the Layer. alpha : int or float Alpha parameter (opacity) for the Layer. dither : str KLayout dither parameter (texture) for the Layer (only used in phidl.utilities.write_lyp) """ layer_dict = {} def __init__(self, gds_layer = 0, gds_datatype = 0, name = 'unnamed', description = None, inverted = False, color = None, alpha = 0.6, dither = None): if isinstance(gds_layer, Layer): l = gds_layer # We were actually passed Layer(mylayer), make a copy gds_datatype = l.gds_datatype name = l.name description = l.description alpha = l.alpha dither = l.dither inverted = l.inverted gds_layer = l.gds_layer self.gds_layer = gds_layer self.gds_datatype = gds_datatype self.name = name self.description = description self.inverted = inverted self.alpha = alpha self.dither = dither try: if color is None: # not specified self.color = None elif np.size(color) == 3: # in format (0.5, 0.5, 0.5) color = np.array(color) if np.any(color > 1) or np.any(color < 0): raise ValueError color = np.array(np.round(color*255), dtype = int) self.color = "#{:02x}{:02x}{:02x}".format(*color) elif color[0] == '#': # in format #1d2e3f if len(color) != 7: raise ValueError int(color[1:],16) # Will throw error if not hex format self.color = color else: # in named format 'gold' self.color = _CSS3_NAMES_TO_HEX[color.lower()] except: raise ValueError("[PHIDL] Layer() color must be specified as a " + "0-1 RGB triplet, (e.g. [0.5, 0.1, 0.9]), an HTML hex color string " + "(e.g. '#a31df4'), or a CSS3 color name (e.g. 'gold' or " + "see http://www.w3schools.com/colors/colors_names.asp )") Layer.layer_dict[(gds_layer, gds_datatype)] = self def __repr__(self): """ Prints a description of the Layer object, including the name, GDS layer, GDS datatype, description, and color of the Layer. """ return ('Layer (name %s, GDS layer %s, GDS datatype %s, description %s, color %s)' % \ (self.name, self.gds_layer, self.gds_datatype, self.description, self.color)) def _parse_layer(layer): """ Check if the variable layer is a Layer object, a 2-element list like [0, 1] representing layer = 0 and datatype = 1, or just a layer number. Parameters ---------- layer : int, array-like[2], or set Variable to check. Returns ------- (gds_layer, gds_datatype) : array-like[2] The layer number and datatype of the input. """ if isinstance(layer, Layer): gds_layer, gds_datatype = layer.gds_layer, layer.gds_datatype elif np.shape(layer) == (2,): # In form [3,0] gds_layer, gds_datatype = layer[0], layer[1] elif np.shape(layer) == (1,): # In form [3] gds_layer, gds_datatype = layer[0], 0 elif layer is None: gds_layer, gds_datatype = 0, 0 elif isinstance(layer, (int, float)): gds_layer, gds_datatype = layer, 0 else: raise ValueError("""[PHIDL] _parse_layer() was passed something that could not be interpreted as a layer: layer = %s""" % layer) return (gds_layer, gds_datatype) class _GeometryHelper(object): """ This is a helper class. It can be added to any other class which has the functions move() and the property ``bbox`` (as in self.bbox). It uses that function+property to enable you to do things like check what the center of the bounding box is (self.center), and also to do things like move the bounding box such that its maximum x value is 5.2 (self.xmax = 5.2). """ @property def center(self): """ Returns the center of the bounding box. """ return np.sum(self.bbox,0)/2 @center.setter def center(self, destination): """ Sets the center of the bounding box. Parameters ---------- destination : array-like[2] Coordinates of the new bounding box center. """ self.move(destination = destination, origin = self.center) @property def x(self): """ Returns the x-coordinate of the center of the bounding box. """ return np.sum(self.bbox,0)[0]/2 @x.setter def x(self, destination): """ Sets the x-coordinate of the center of the bounding box. Parameters ---------- destination : int or float x-coordinate of the bbox center. """ destination = (destination, self.center[1]) self.move(destination = destination, origin = self.center, axis = 'x') @property def y(self): """ Returns the y-coordinate of the center of the bounding box. """ return np.sum(self.bbox,0)[1]/2 @y.setter def y(self, destination): """ Sets the y-coordinate of the center of the bounding box. Parameters ---------- destination : int or float y-coordinate of the bbox center. """ destination = (self.center[0], destination) self.move(destination = destination, origin = self.center, axis = 'y') @property def xmax(self): """ Returns the maximum x-value of the bounding box. """ return self.bbox[1][0] @xmax.setter def xmax(self, destination): """ Sets the x-coordinate of the maximum edge of the bounding box. Parameters ---------- destination : int or float x-coordinate of the maximum edge of the bbox. """ self.move(destination = (destination, 0), origin = self.bbox[1], axis = 'x') @property def ymax(self): """ Returns the maximum y-value of the bounding box. """ return self.bbox[1][1] @ymax.setter def ymax(self, destination): """ Sets the y-coordinate of the maximum edge of the bounding box. Parameters ---------- destination : int or float y-coordinate of the maximum edge of the bbox. """ self.move(destination = (0, destination), origin = self.bbox[1], axis = 'y') @property def xmin(self): """ Returns the minimum x-value of the bounding box. """ return self.bbox[0][0] @xmin.setter def xmin(self, destination): """ Sets the x-coordinate of the minimum edge of the bounding box. Parameters ---------- destination : int or float x-coordinate of the minimum edge of the bbox. """ self.move(destination = (destination, 0), origin = self.bbox[0], axis = 'x') @property def ymin(self): """ Returns the minimum y-value of the bounding box. """ return self.bbox[0][1] @ymin.setter def ymin(self, destination): """ Sets the y-coordinate of the minimum edge of the bounding box. Parameters ---------- destination : int or float y-coordinate of the minimum edge of the bbox. """ self.move(destination = (0, destination), origin = self.bbox[0], axis = 'y') @property def size(self): """ Returns the (x, y) size of the bounding box. """ bbox = self.bbox return bbox[1] - bbox[0] @property def xsize(self): """ Returns the horizontal size of the bounding box. """ bbox = self.bbox return bbox[1][0] - bbox[0][0] @property def ysize(self): """ Returns the vertical size of the bounding box. """ bbox = self.bbox return bbox[1][1] - bbox[0][1] def movex(self, origin = 0, destination = None): """ Moves an object by a specified x-distance. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, key, or None Destination point of the move. """ if destination is None: destination = origin origin = 0 self.move(origin = (origin, 0), destination = (destination, 0)) return self def movey(self, origin = 0, destination = None): """ Moves an object by a specified y-distance. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. """ if destination is None: destination = origin origin = 0 self.move(origin = (0, origin), destination = (0, destination)) return self def __add__(self, element): """ Adds an element to a Group. Parameters ---------- element : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group Element to add. """ if isinstance(self, Group): G = Group() G.add(self.elements) G.add(element) else: G = Group([self, element]) return G class Port(object): """ Port object that can be used to easily snap together other geometric objects Parameters ---------- name : str Name of the Port object. midpoint : array-like[2] of int or float Midpoint of the Port location. width : int or float Width of the Port. orientation : int or float Orientation (rotation) of the Port. parent : """ _next_uid = 0 def __init__(self, name = None, midpoint = (0, 0), width = 1, orientation = 0, parent = None): self.name = name self.midpoint = np.array(midpoint, dtype = 'float64') self.width = width self.orientation = mod(orientation, 360) self.parent = parent self.info = {} self.uid = Port._next_uid if self.width < 0: raise ValueError('[PHIDL] Port creation ' 'error: width must be >=0') Port._next_uid += 1 def __repr__(self): """ Prints a description of the Port object, including the name, midpoint, width, and orientation of the Port. """ return ('Port (name %s, midpoint %s, width %s, orientation %s)' % \ (self.name, self.midpoint, self.width, self.orientation)) @property def endpoints(self): """ Returns the endpoints of the Port. """ dxdy = np.array([ self.width/2*cos((self.orientation - 90) * pi/180), self.width/2*sin((self.orientation - 90) * pi/180) ]) left_point = self.midpoint - dxdy right_point = self.midpoint + dxdy return np.array([left_point, right_point]) @endpoints.setter def endpoints(self, points): """ Sets the endpoints of a Port. Parameters ---------- points : array-like[2] of int or float Endpoints to assign to the Port. """ p1, p2 = np.array(points[0]), np.array(points[1]) self.midpoint = (p1+p2)/2 dx, dy = p2-p1 self.orientation = np.arctan2(dx, -dy) * 180/pi self.width = sqrt(dx**2 + dy**2) @property def normal(self): """ Returns a vector normal to the Port Returns ------- array-like[2] Vector normal to the Port """ dx = cos((self.orientation) * pi/180) dy = sin((self.orientation) * pi/180) return np.array([self.midpoint, self.midpoint + np.array([dx, dy])]) @property def x(self): """ Returns the x-coordinate of the Port midpoint. """ return self.midpoint[0] @property def y(self): """ Returns the y-coordinate of the Port midpoint. """ return self.midpoint[1] @property def center(self): """ Returns the midpoint of the Port. """ return self.midpoint def _copy(self, new_uid = True): """ Copies a Port. Returns ------- Port Copied Port. Notes ----- Use this function instead of copy() (which will not create a new numpy array for self.midpoint) or deepcopy() (which will also deepcopy the self.parent DeviceReference recursively, causing performance issues). """ new_port = Port(name = self.name, midpoint = self.midpoint, width = self.width, orientation = self.orientation, parent = self.parent) new_port.info = deepcopy(self.info) if new_uid == False: new_port.uid = self.uid Port._next_uid -= 1 return new_port def rotate(self, angle = 45, center = None): """ Rotates a Port around the specified center point, if no centerpoint specified will rotate around (0,0). Parameters ---------- angle : int or float Angle to rotate the Port in degrees. center : array-like[2] or None Midpoint of the Port. """ self.orientation = mod(self.orientation + angle, 360) if center is None: center = self.midpoint self.midpoint = _rotate_points(self.midpoint, angle = angle, center = center) return self class Polygon(gdspy.Polygon, _GeometryHelper): """ Polygonal geometric object. Parameters ---------- points : array-like[N][2] Coordinates of the vertices of the Polygon. gds_layer : int GDSII layer of the Polygon. gds_datatype : int GDSII datatype of the Polygon. parent : """ def __init__(self, points, gds_layer, gds_datatype, parent): self.parent = parent super(Polygon, self).__init__(points = points, layer = gds_layer, datatype = gds_datatype) @property def bbox(self): """ Returns the bounding box of the Polygon. """ return self.get_bounding_box() def rotate(self, angle = 45, center = (0,0)): """ Rotates a Polygon by the specified angle. Parameters ---------- angle : int or float Angle to rotate the Polygon in degrees. center : array-like[2] or None Midpoint of the Polygon. """ super(Polygon, self).rotate(angle = angle*pi/180, center = center) if self.parent is not None: self.parent._bb_valid = False return self def move(self, origin = (0,0), destination = None, axis = None): """ Moves elements of the Device from the origin point to the destination. Both origin and destination can be 1x2 array-like, Port, or a key corresponding to one of the Ports in this device. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. axis : {'x', 'y'} Direction of move. """ dx,dy = _parse_move(origin, destination, axis) super(Polygon, self).translate(dx, dy) if self.parent is not None: self.parent._bb_valid = False return self def mirror(self, p1 = (0,1), p2 = (0,0)): """ Mirrors a Polygon across the line formed between the two specified points. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ for n, points in enumerate(self.polygons): self.polygons[n] = _reflect_points(points, p1, p2) if self.parent is not None: self.parent._bb_valid = False return self def reflect(self, p1 = (0,1), p2 = (0,0)): """ .. deprecated:: 1.3.0 `reflect` will be removed in May 2021, please replace with `mirror`. """ warnings.warn('[PHIDL] Warning: reflect() will be deprecated in May 2021, please replace with mirror()') return self.mirror(p1, p2) def simplify(self, tolerance = 1e-3): """ Removes points from the polygon but does not change the polygon shape by more than `tolerance` from the original. Uses the Ramer-Douglas-Peucker algorithm. Parameters ---------- tolerance : float Tolerance value for the simplification algorithm. All points that can be removed without changing the resulting polygon by more than the value listed here will be removed. Also known as `epsilon` here https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm """ for n, points in enumerate(self.polygons): self.polygons[n] = _simplify(points, tolerance = tolerance) if self.parent is not None: self.parent._bb_valid = False return self def make_device(fun, config = None, **kwargs): """ Makes a Device from a function. Parameters ---------- fun : str Name of the function to make the Device with. config : dict or None A dictionary containing arguments for the given function. Returns ------- D : Device A Device constructed from the specified function. """ config_dict = {} if type(config) is dict: config_dict = dict(config) elif config is None: pass else: raise TypeError("""[PHIDL] When creating Device() from a function, the second argument should be a ``config`` argument which is a dictionary containing arguments for the function. e.g. make_device(ellipse, config = ellipse_args_dict) """) config_dict.update(**kwargs) D = fun(**config_dict) if not isinstance(D, Device): raise ValueError("""[PHIDL] Device() was passed a function, but that function does not produce a Device.""") return D class Device(gdspy.Cell, _GeometryHelper): """ The basic object that holds polygons, labels, and ports in PHIDL """ _next_uid = 0 def __init__(self, *args, **kwargs): if len(args) > 0: if callable(args[0]): raise ValueError('[PHIDL] You can no longer create geometry ' 'by calling Device(device_making_function), please use ' 'make_device(device_making_function) instead') # Allow name to be set like Device('arc') or Device(name = 'arc') if 'name' in kwargs: name = kwargs['name'] elif (len(args) == 1) and (len(kwargs) == 0): name = args[0] else: name = 'Unnamed' # Make a new blank device self.ports = {} self.info = {} self.aliases = {} # self.a = self.aliases # self.p = self.ports self.uid = Device._next_uid super(Device, self).__init__(name = name) Device._next_uid += 1 def __getitem__(self, key): """ If you have a Device D, allows access to aliases you made like D['arc2']. Parameters ---------- key : str Element name to access within the Device. Returns ------- self._layers[val] : Layer Accessed element in the Device. """ try: return self.aliases[key] except: raise ValueError('[PHIDL] Tried to access alias "%s" in Device ' '"%s", which does not exist' % (key, self.name)) def __repr__(self): """ Prints a description of the Device, including the name, uid, ports, aliases, polygons, and references. """ return ('Device (name "%s" (uid %s), ports %s, aliases %s, %s ' 'polygons, %s references)' % \ (self.name, self.uid, list(self.ports.keys()), list(self.aliases.keys()), len(self.polygons), len(self.references))) def __str__(self): """ Prints a description of the Device, including the name, uid, ports, aliases, polygons, and references. """ return self.__repr__() def __lshift__(self, element): """ Convenience operators equivalent to add_ref() Parameters ---------- elements : Device Element to reference """ return self.add_ref(element) def __setitem__(self, key, element): """ Allow adding polygons and cell references like D['arc3'] = pg.arc() Parameters ---------- key : Alias name element : Object that will be accessible by alias name Returns ------- """ if isinstance(element, (DeviceReference,Polygon,CellArray)): self.aliases[key] = element else: raise ValueError('[PHIDL] Tried to assign alias "%s" in ' 'Device "%s", but failed because the item was ' 'not a DeviceReference' % (key, self.name)) @property def layers(self): """ Returns a set of the Layers in the Device. """ return self.get_layers() # @property # def references(self): # return [e for e in self.elements if isinstance(e, DeviceReference)] # @property # def polygons(self): # return [e for e in self.elements if isinstance(e, gdspy.PolygonSet)] @property def bbox(self): """ Returns the bounding box of the Device. """ bbox = self.get_bounding_box() if bbox is None: bbox = ((0,0),(0,0)) return np.array(bbox) def add_ref(self, device, alias = None): """ Takes a Device and adds it as a DeviceReference to the current Device. Parameters ---------- device : Device Device to be added as a DeviceReference. alias : str Alias of the Device. Returns ------- d : DeviceReference A DeviceReference that is added to the current Device. """ if _is_iterable(device): return [self.add_ref(E) for E in device] if not isinstance(device, Device): raise TypeError("""[PHIDL] add_ref() was passed something that was not a Device object. """) d = DeviceReference(device) # Create a DeviceReference (CellReference) d.owner = self self.add(d) # Add DeviceReference (CellReference) to Device (Cell) if alias is not None: self.aliases[alias] = d return d # Return the DeviceReference (CellReference) def add_polygon(self, points, layer = np.nan): """ Adds a Polygon to the Device. Parameters ---------- points : array-like[N][2] Coordinates of the vertices of the Polygon. layer : int, array-like[2], or set Specific layer(s) to put polygon geometry on. """ if layer is None: return None # Check if input a list of polygons by seeing if it's 3 levels deep try: points[0][0][0] # Try to access first x point return [self.add_polygon(p, layer) for p in points] except: pass # Verified points is not a list of polygons, continue on if isinstance(points, gdspy.PolygonSet): if layer is np.nan: layers = zip(points.layers, points.datatypes) else: layers = [layer]*len(points.polygons) return [self.add_polygon(p, layer) for p, layer in zip(points.polygons, layers)] if layer is np.nan: layer = 0 # Check if layer is actually a list of Layer objects try: if isinstance(layer, LayerSet): return [self.add_polygon(points, l) for l in layer._layers.values()] elif isinstance(layer, set): return [self.add_polygon(points, l) for l in layer] elif all([isinstance(l, (Layer)) for l in layer]): return [self.add_polygon(points, l) for l in layer] elif len(layer) > 2: # Someone wrote e.g. layer = [1,4,5] raise ValueError(""" [PHIDL] If specifying multiple layers you must use set notation, e.g. {1,5,8} """) except: pass # If in the form [[1,3,5],[2,4,6]] if len(points[0]) > 2: # Convert to form [[1,2],[3,4],[5,6]] points = np.column_stack((points)) gds_layer, gds_datatype = _parse_layer(layer) polygon = Polygon(points = points, gds_layer = gds_layer, gds_datatype = gds_datatype, parent = self) self.add(polygon) return polygon def add_array(self, device, columns = 2, rows = 2, spacing = (100, 100), alias = None): """ Creates a CellArray reference to a Device. Parameters ---------- device : Device The referenced Device. columns : int Number of columns in the array. rows : int Number of rows in the array. spacing : array-like[2] of int or float Distances between adjacent columns and adjacent rows. alias : str or None Alias of the referenced Device. Returns ------- a : CellArray A CellArray containing references to the input Device. """ if not isinstance(device, Device): raise TypeError("""[PHIDL] add_array() was passed something that was not a Device object. """) a = CellArray(device = device, columns = int(round(columns)), rows = int(round(rows)), spacing = spacing) a.owner = self self.add(a) # Add DeviceReference (CellReference) to Device (Cell) if alias is not None: self.aliases[alias] = a return a # Return the CellArray def add_port(self, name = None, midpoint = (0,0), width = 1, orientation = 45, port = None): """ Adds a Port to the Device. Parameters ---------- name : str Name of the Port object. midpoint : array-like[2] of int or float Midpoint of the Port location. width : int or float Width of the Port. orientation : int or float Orientation (rotation) of the Port. port : Port or None A Port if the added Port is a copy of an existing Port. Notes ----- Can be called to copy an existing port like add_port(port = existing_port) or to create a new port add_port(myname, mymidpoint, mywidth, myorientation). Can also be called to copy an existing port with a new name like add_port(port = existing_port, name = new_name) """ if port is not None: if not isinstance(port, Port): raise ValueError('[PHIDL] add_port() error: Argument `port` must be a Port for copying') p = port._copy(new_uid = True) p.parent = self elif isinstance(name, Port): p = name._copy(new_uid = True) p.parent = self name = p.name else: p = Port(name = name, midpoint = midpoint, width = width, orientation = orientation, parent = self) if name is not None: p.name = name if p.name in self.ports: raise ValueError('[DEVICE] add_port() error: Port name "%s" already exists in this Device (name "%s", uid %s)' % (p.name, self.name, self.uid)) self.ports[p.name] = p return p def add_label(self, text = 'hello', position = (0,0), magnification = None, rotation = None, anchor = 'o', layer = 255): """ Adds a Label to the Device. Parameters ---------- text : str Label text. position : array-like[2] x-, y-coordinates of the Label location. magnification : int, float, or None Magnification factor for the Label text. rotation : int, float, or None Angle rotation of the Label text. anchor : {'n', 'e', 's', 'w', 'o', 'ne', 'nw', ...} Position of the anchor relative to the text. layer : int, array-like[2], or set Specific layer(s) to put Label on. """ if layer is None: return None if len(text) >= 1023: raise ValueError('[DEVICE] label() error: Text too long (limit 1024 chars)') gds_layer, gds_datatype = _parse_layer(layer) if type(text) is not str: text = str(text) l = Label(text = text, position = position, anchor = anchor, magnification = magnification, rotation = rotation, layer = gds_layer, texttype = gds_datatype) self.add(l) return l def label(self, *args, **kwargs): """ .. deprecated:: 1.3.0 `label` will be removed, please replace with `add_label`. """ warnings.warn('[PHIDL] WARNING: label() will be deprecated, please replace with add_label()') return self.add_label(*args, **kwargs) def write_gds(self, filename, unit = 1e-6, precision = 1e-9, auto_rename = True, max_cellname_length = 28, cellname = 'toplevel'): """ Writes a Device to a GDS file. Parameters ---------- filename : str or file The GDS file to write to. unit : int or float Unit size for the objects in the library (in `meters`). precision : float Precision for the dimensions of the objects in the library (in `meters`). auto_rename : bool If True, fixes any duplicate cell names. max_cellname_length : int or None If given, and if `auto_rename` is True, enforces a limit on the length of the fixed duplicate cellnames. cellname : str Name of the top-level cell in the saved GDS Returns ------- """ # If string, try to append ".gds" to the end, otherwise leave alone try: if filename[-4:] != '.gds': filename += '.gds' except: pass referenced_cells = list(self.get_dependencies(recursive=True)) all_cells = [self] + referenced_cells # Autofix names so there are no duplicates if auto_rename == True: all_cells_sorted = sorted(all_cells, key=lambda x: x.uid) all_cells_original_names = [c.name for c in all_cells_sorted] used_names = {cellname} n = 1 for c in all_cells_sorted: if max_cellname_length is not None: new_name = c.name[:max_cellname_length] else: new_name = c.name temp_name = new_name while temp_name in used_names: n += 1 temp_name = new_name + ('%0.3i' % n) new_name = temp_name used_names.add(new_name) c.name = new_name self.name = cellname # Write the gds lib = gdspy.GdsLibrary(unit=unit, precision=precision) lib.write_gds(filename, cells=all_cells) # Return cells to their original names if they were auto-renamed if auto_rename == True: for n,c in enumerate(all_cells_sorted): c.name = all_cells_original_names[n] return filename def remap_layers(self, layermap = {}, include_labels = True): """ Moves all polygons in the Device from one layer to another according to the layermap argument. Parameters ---------- layermap : dict Dictionary of values in format {layer_from : layer_to} include_labels : bool Selects whether to move Labels along with polygons """ layermap = {_parse_layer(k):_parse_layer(v) for k,v in layermap.items()} all_D = list(self.get_dependencies(True)) all_D += [self] for D in all_D: for p in D.polygons: for n, layer in enumerate(p.layers): original_layer = (p.layers[n], p.datatypes[n]) original_layer = _parse_layer(original_layer) if original_layer in layermap.keys(): new_layer = layermap[original_layer] p.layers[n] = new_layer[0] p.datatypes[n] = new_layer[1] if include_labels == True: for l in D.labels: original_layer = (l.layer, l.texttype) original_layer = _parse_layer(original_layer) if original_layer in layermap.keys(): new_layer = layermap[original_layer] l.layer = new_layer[0] l.texttype = new_layer[1] return self def remove_layers(self, layers = (), include_labels = True, invert_selection = False): """ Removes layers from a Device. Parameters ---------- layers : int, array-like[2], or set Specific layer(s) to remove. include_labels : bool If True, keeps the labels corresponding to the input layers. invert_selection : bool If True, removes all layers except those specified. """ layers = [_parse_layer(l) for l in layers] all_D = list(self.get_dependencies(True)) all_D += [self] for D in all_D: for polygonset in D.polygons: polygon_layers = zip(polygonset.layers, polygonset.datatypes) polygons_to_keep = [(pl in layers) for pl in polygon_layers] if invert_selection == False: polygons_to_keep = [(not p) for p in polygons_to_keep] polygonset.polygons = [p for p,keep in zip(polygonset.polygons, polygons_to_keep) if keep] polygonset.layers = [p for p,keep in zip(polygonset.layers, polygons_to_keep) if keep] polygonset.datatypes = [p for p,keep in zip(polygonset.datatypes, polygons_to_keep) if keep] if include_labels == True: new_labels = [] for l in D.labels: original_layer = (l.layer, l.texttype) original_layer = _parse_layer(original_layer) if invert_selection: keep_layer = (original_layer in layers) else: keep_layer = (original_layer not in layers) if keep_layer: new_labels += [l] D.labels = new_labels return self def distribute(self, elements = 'all', direction = 'x', spacing = 100, separation = True, edge = 'center'): """ Distributes the specified elements in the Device. Parameters ---------- elements : array-like of PHIDL objects or 'all' Elements to distribute. direction : {'x', 'y'} Direction of distribution; either a line in the x-direction or y-direction. spacing : int or float Distance between elements. separation : bool If True, guarantees elements are speparated with a fixed spacing between; if False, elements are spaced evenly along a grid. edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'} Which edge to perform the distribution along (unused if separation == True) """ if elements == 'all': elements = (self.polygons + self.references) _distribute(elements = elements, direction = direction, spacing = spacing, separation = separation, edge = edge) return self def align(self, elements = 'all', alignment = 'ymax'): """ Align elements in the Device Parameters ---------- elements : array-like of PHIDL objects, or 'all' Elements in the Device to align. alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'} Which edge to align along (e.g. 'ymax' will move the elements such that all of their topmost points are aligned) """ if elements == 'all': elements = (self.polygons + self.references) _align(elements, alignment = alignment) return self def flatten(self, single_layer = None): """ Flattens the heirarchy of the Device such that there are no longer any references to other Devices. All polygons and labels from underlying references are copied and placed in the top-level Device. If single_layer is specified, all polygons are moved to that layer. Parameters ---------- single_layer : None, int, tuple of int, or set of int If not None, all polygons are moved to the specified """ if single_layer is None: super(Device, self).flatten(single_layer = None, single_datatype = None, single_texttype = None) else: gds_layer, gds_datatype = _parse_layer(single_layer) super(Device, self).flatten(single_layer = gds_layer, single_datatype = gds_datatype, single_texttype = gds_datatype) temp_polygons = list(self.polygons) self.references = [] self.polygons = [] [self.add_polygon(poly) for poly in temp_polygons] return self def absorb(self, reference): """ Flattens and absorbs polygons from an underlying DeviceReference into the Device, destroying the reference in the process but keeping the polygon geometry. Parameters ---------- reference : DeviceReference DeviceReference to be absorbed into the Device. """ if reference not in self.references: raise ValueError("""[PHIDL] Device.absorb() failed - the reference it was asked to absorb does not exist in this Device. """) ref_polygons = reference.get_polygons(by_spec = True) for (layer, polys) in ref_polygons.items(): [self.add_polygon(points = p, layer = layer) for p in polys] self.remove(reference) return self def get_ports(self, depth = None): """ Returns copies of all the ports of the Device, rotated and translated so that they're in their top-level position. The Ports returned are copies of the originals, but each copy has the same ``uid`` as the original so that they can be traced back to the original if needed. Parameters ---------- depth : int or None If not None, defines from how many reference levels to retrieve Ports from. Returns ------- port_list : list of Port List of all Ports in the Device. """ port_list = [p._copy(new_uid = False) for p in self.ports.values()] if depth is None or depth > 0: for r in self.references: if depth is None: new_depth = None else: new_depth = depth - 1 ref_ports = r.parent.get_ports(depth=new_depth) # Transform ports that came from a reference ref_ports_transformed = [] for rp in ref_ports: new_port = rp._copy(new_uid = False) new_midpoint, new_orientation = r._transform_port(rp.midpoint, \ rp.orientation, r.origin, r.rotation, r.x_reflection) new_port.midpoint = new_midpoint new_port.new_orientation = new_orientation ref_ports_transformed.append(new_port) port_list += ref_ports_transformed return port_list def remove(self, items): """ Removes items from a Device, which can include Ports, PolygonSets, CellReferences, and Labels. Parameters ---------- items : array-like[N] Items to be removed from the Device. """ if not _is_iterable(items): items = [items] for item in items: if isinstance(item, Port): try: self.ports = { k:v for k, v in self.ports.items() if v != item} except: raise ValueError("""[PHIDL] Device.remove() cannot find the Port it was asked to remove in the Device: "%s".""" % (item)) else: try: if isinstance(item, gdspy.PolygonSet): self.polygons.remove(item) if isinstance(item, gdspy.CellReference): self.references.remove(item) if isinstance(item, gdspy.Label): self.labels.remove(item) self.aliases = { k:v for k, v in self.aliases.items() if v != item} except: raise ValueError("""[PHIDL] Device.remove() cannot find the item it was asked to remove in the Device: "%s".""" % (item)) self._bb_valid = False return self def rotate(self, angle = 45, center = (0,0)): """ Rotates all Polygons in the Device around the specified center point. Parameters ---------- angle : int or float Angle to rotate the Device in degrees. center : array-like[2] or None Midpoint of the Device. """ if angle == 0: return self for e in self.polygons: e.rotate(angle = angle, center = center) for e in self.references: e.rotate(angle, center) for e in self.labels: e.rotate(angle, center) for p in self.ports.values(): p.midpoint = _rotate_points(p.midpoint, angle, center) p.orientation = mod(p.orientation + angle, 360) self._bb_valid = False return self def move(self, origin = (0,0), destination = None, axis = None): """ Moves elements of the Device from the origin point to the destination. Both origin and destination can be 1x2 array-like, Port, or a key corresponding to one of the Ports in this Device. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. axis : {'x', 'y'} Direction of the move. """ dx,dy = _parse_move(origin, destination, axis) # Move geometries for e in self.polygons: e.translate(dx,dy) for e in self.references: e.move((dx,dy)) for e in self.labels: e.move((dx,dy)) for p in self.ports.values(): p.midpoint = np.array(p.midpoint) + np.array((dx,dy)) self._bb_valid = False return self def mirror(self, p1 = (0,1), p2 = (0,0)): """ Mirrors a Device across the line formed between the two specified points. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ for e in (self.polygons+self.references+self.labels): e.mirror(p1, p2) for p in self.ports.values(): p.midpoint = _reflect_points(p.midpoint, p1, p2) phi = np.arctan2(p2[1]-p1[1], p2[0]-p1[0])*180/pi p.orientation = 2*phi - p.orientation self._bb_valid = False return self def hash_geometry(self, precision = 1e-4): """ Computes an SHA1 hash of the geometry in the Device. For each layer, each polygon is individually hashed and then the polygon hashes are sorted, to ensure the hash stays constant regardless of the ordering the polygons. Similarly, the layers are sorted by (layer, datatype) Parameters ---------- precision : float Roudning precision for the the objects in the Device. For instance, a precision of 1e-2 will round a point at (0.124, 1.748) to (0.12, 1.75) Returns ------- str Hash result in the form of an SHA1 hex digest string Notes ----- Algorithm: .. code-block:: python hash( hash(First layer information: [layer1, datatype1]), hash(Polygon 1 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3)] ), hash(Polygon 2 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] ), hash(Polygon 3 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3)] ), hash(Second layer information: [layer2, datatype2]), hash(Polygon 1 on layer 2 points: [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] ), hash(Polygon 2 on layer 2 points: [(x1,y1),(x2,y2),(x3,y3)] ), ) """ polygons_by_spec = self.get_polygons(by_spec = True) layers = np.array(list(polygons_by_spec.keys())) sorted_layers = layers[np.lexsort((layers[:,0], layers[:,1]))] # A random offset which fixes common rounding errors intrinsic # to floating point math. Example: with a precision of 0.1, the # floating points 7.049999 and 7.050001 round to different values # (7.0 and 7.1), but offset values (7.220485 and 7.220487) don't magic_offset = .17048614 final_hash = hashlib.sha1() for layer in sorted_layers: layer_hash = hashlib.sha1(layer.astype(np.int64)).digest() polygons = polygons_by_spec[tuple(layer)] polygons = [np.ascontiguousarray((p/precision) + magic_offset, dtype = np.int64) for p in polygons] polygon_hashes = np.sort([hashlib.sha1(p).digest() for p in polygons]) final_hash.update(layer_hash) for ph in polygon_hashes: final_hash.update(ph) return final_hash.hexdigest() class DeviceReference(gdspy.CellReference, _GeometryHelper): """ Simple reference to an existing Device. Parameters ---------- device : Device The referenced Device. origin : array-like[2] of int or float Position where the Device is inserted. rotation : int or float Angle of rotation of the reference (in `degrees`) magnification : int or float Magnification factor for the reference. x_reflection : bool If True, the reference is reflected parallel to the x-direction before being rotated. """ def __init__(self, device, origin=(0, 0), rotation=0, magnification=None, x_reflection=False): super(DeviceReference, self).__init__( ref_cell = device, origin=origin, rotation=rotation, magnification=magnification, x_reflection=x_reflection, ignore_missing=False) self.parent = device self.owner = None # The ports of a DeviceReference have their own unique id (uid), # since two DeviceReferences of the same parent Device can be # in different locations and thus do not represent the same port self._local_ports = {name:port._copy(new_uid = True) for name, port in device.ports.items()} def __repr__(self): """ Prints a description of the DeviceReference, including parent Device, ports, origin, rotation, and x_reflection. """ return ('DeviceReference (parent Device "%s", ports %s, origin %s, rotation %s, x_reflection %s)' % \ (self.parent.name, list(self.ports.keys()), self.origin, self.rotation, self.x_reflection)) def __str__(self): """ Prints a description of the DeviceReference, including parent Device, ports, origin, rotation, and x_reflection. """ return self.__repr__() def __getitem__(self, val): """ This allows you to access an alias from the reference's parent and receive a copy of the reference which is correctly rotated and translated. Parameters ---------- val : str Alias from the reference's parent to be accessed. Returns ------- new_reference : DeviceReference DeviceReference for the copied parent reference. """ try: alias_device = self.parent[val] except: raise ValueError('[PHIDL] Tried to access alias "%s" from parent ' 'Device "%s", which does not exist' % (val, self.parent.name)) new_reference = DeviceReference(alias_device.parent, origin=alias_device.origin, rotation=alias_device.rotation, magnification=alias_device.magnification, x_reflection=alias_device.x_reflection) if self.x_reflection: new_reference.mirror((1,0)) if self.rotation is not None: new_reference.rotate(self.rotation) if self.origin is not None: new_reference.move(self.origin) return new_reference @property def ports(self): """ This property allows you to access myref.ports, and receive a copy of the ports dict which is correctly rotated and translated. """ for name, port in self.parent.ports.items(): port = self.parent.ports[name] new_midpoint, new_orientation = self._transform_port(port.midpoint, \ port.orientation, self.origin, self.rotation, self.x_reflection) if name not in self._local_ports: self._local_ports[name] = port._copy(new_uid = True) self._local_ports[name].midpoint = new_midpoint self._local_ports[name].orientation = mod(new_orientation,360) self._local_ports[name].parent = self # Remove any ports that no longer exist in the reference's parent parent_names = self.parent.ports.keys() local_names = list(self._local_ports.keys()) for name in local_names: if name not in parent_names: self._local_ports.pop(name) return self._local_ports @property def info(self): """ Returns information about the properties of the reference's parent. """ return self.parent.info @property def bbox(self): """ Returns the bounding box of the DeviceReference. """ bbox = self.get_bounding_box() if bbox is None: bbox = ((0,0),(0,0)) return np.array(bbox) def _transform_port(self, point, orientation, origin = (0, 0), rotation = None, x_reflection = False): """ Applies various transformations to a Port. Parameters ---------- point : array-like[N][2] Coordinates of the Port. orientation : int, float, or None Orientation of the Port origin : array-like[2] or None If given, shifts the transformed points to the specified origin. rotation : int, float, or None Angle of rotation to apply x_reflection : bool If True, reflects the Port across the x-axis before applying rotation. Returns ------- new_point : array-like[N][2] Coordinates of the transformed Port. new_orientation : int, float, or None """ # Apply GDS-type transformations to a port (x_ref) new_point = np.array(point) new_orientation = orientation if x_reflection: new_point[1] = -new_point[1] new_orientation = -orientation if rotation is not None: new_point = _rotate_points(new_point, angle = rotation, center = [0, 0]) new_orientation += rotation if origin is not None: new_point = new_point + np.array(origin) new_orientation = mod(new_orientation, 360) return new_point, new_orientation def move(self, origin = (0, 0), destination = None, axis = None): """ Moves the DeviceReference from the origin point to the destination. Both origin and destination can be 1x2 array-like, Port, or a key corresponding to one of the Ports in this DeviceReference. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. axis : {'x', 'y'} Direction of move. """ dx, dy = _parse_move(origin, destination, axis) self.origin = np.array(self.origin) + np.array((dx, dy)) if self.owner is not None: self.owner._bb_valid = False return self def rotate(self, angle = 45, center = (0, 0)): """ Rotates all Polygons in the DeviceReference around the specified centerpoint. Parameters ---------- angle : int or float Angle to rotate the DeviceReference in degrees. center : array-like[2] or None Midpoint of the DeviceReference. """ if angle == 0: return self if type(center) is Port: center = center.midpoint self.rotation += angle self.origin = _rotate_points(self.origin, angle, center) if self.owner is not None: self.owner._bb_valid = False return self def mirror(self, p1 = (0, 1), p2 = (0, 0)): """ Mirrors a DeviceReference across the line formed between the two specified points. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ if type(p1) is Port: p1 = p1.midpoint if type(p2) is Port: p2 = p2.midpoint p1 = np.array(p1); p2 = np.array(p2) # Translate so reflection axis passes through origin self.origin = self.origin - p1 # Rotate so reflection axis aligns with x-axis angle = np.arctan2((p2[1]-p1[1]), (p2[0]-p1[0])) * 180/pi self.origin = _rotate_points(self.origin, angle = -angle, center = [0, 0]) self.rotation -= angle # Reflect across x-axis self.x_reflection = not self.x_reflection self.origin[1] = -self.origin[1] self.rotation = -self.rotation # Un-rotate and un-translate self.origin = _rotate_points(self.origin, angle = angle, center = [0, 0]) self.rotation += angle self.origin = self.origin + p1 if self.owner is not None: self.owner._bb_valid = False return self def reflect(self, p1 = (0, 1), p2 = (0, 0)): """ .. deprecated:: 1.3.0 `reflect` will be removed in May 2021, please replace with `mirror`. """ warnings.warn('[PHIDL] Warning: reflect() will be deprecated in ' 'May 2021, please replace with mirror()') return self.mirror(p1, p2) def connect(self, port, destination, overlap = 0): """ Moves and rotates this object such that the the Port specified by `port` is connected (aligned and adjacent) with the Port specified by `destination` Parameters ---------- port : str or Port destination : array-like[2] overlap : int or float """ # ``port`` can either be a string with the name or an actual Port if port in self.ports: # Then ``port`` is a key for the ports dict p = self.ports[port] elif type(port) is Port: p = port else: raise ValueError('[PHIDL] connect() did not receive a Port or valid port name' + \ ' - received (%s), ports available are (%s)' % (port, tuple(self.ports.keys()))) self.rotate(angle = 180 + destination.orientation - p.orientation, center = p.midpoint) self.move(origin = p, destination = destination) self.move(-overlap*np.array([cos(destination.orientation*pi/180), sin(destination.orientation*pi/180)])) return self class CellArray(gdspy.CellArray, _GeometryHelper): """ Multiple references to an existing cell in an array format. Parameters ---------- device : Device The referenced Device. columns : int Number of columns in the array. rows : int Number of rows in the array. spacing : array-like[2] of int or float Distances between adjacent columns and adjacent rows. origin : array-like[2] of int or float Position where the cell is inserted. rotation : int or float Angle of rotation of the reference (in `degrees`). magnification : int or float Magnification factor for the reference. x_reflection : bool If True, the reference is reflected parallel to the x direction before being rotated. """ def __init__(self, device, columns, rows, spacing, origin = (0, 0), rotation = 0, magnification = None, x_reflection = False): super(CellArray, self).__init__( columns = columns, rows = rows, spacing = spacing, ref_cell = device, origin = origin, rotation = rotation, magnification = magnification, x_reflection = x_reflection, ignore_missing = False) self.parent = device self.owner = None @property def bbox(self): """ Returns the bounding box of the CellArray. """ bbox = self.get_bounding_box() if bbox is None: bbox = ((0, 0), (0, 0)) return np.array(bbox) def move(self, origin = (0, 0), destination = None, axis = None): """ Moves the CellArray from the origin point to the destination. Both origin and destination can be 1x2 array-like, Port, or a key corresponding to one of the Ports in this CellArray. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. axis : {'x', 'y'} Direction of the move. """ dx, dy = _parse_move(origin, destination, axis) self.origin = np.array(self.origin) + np.array((dx, dy)) if self.owner is not None: self.owner._bb_valid = False return self def rotate(self, angle = 45, center = (0, 0)): """ Rotates all elements in the CellArray around the specified centerpoint. Parameters ---------- angle : int or float Angle to rotate the CellArray in degrees. center : array-like[2], Port, or None Midpoint of the CellArray. """ if angle == 0: return self if type(center) is Port: center = center.midpoint self.rotation += angle self.origin = _rotate_points(self.origin, angle, center) if self.owner is not None: self.owner._bb_valid = False return self def mirror(self, p1 = (0, 1), p2 = (0, 0)): """ Mirrors a CellArray across the line formed between the two specified points. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ if type(p1) is Port: p1 = p1.midpoint if type(p2) is Port: p2 = p2.midpoint p1 = np.array(p1); p2 = np.array(p2) # Translate so reflection axis passes through origin self.origin = self.origin - p1 # Rotate so reflection axis aligns with x-axis angle = np.arctan2((p2[1]-p1[1]), (p2[0]-p1[0])) * 180/pi self.origin = _rotate_points(self.origin, angle = -angle, center = [0, 0]) self.rotation -= angle # Reflect across x-axis self.x_reflection = not self.x_reflection self.origin[1] = -self.origin[1] self.rotation = -self.rotation # Un-rotate and un-translate self.origin = _rotate_points(self.origin, angle = angle, center = [0, 0]) self.rotation += angle self.origin = self.origin + p1 if self.owner is not None: self.owner._bb_valid = False return self def reflect(self, p1 = (0, 1), p2 = (0, 0)): """ .. deprecated:: 1.3.0 `reflect` will be removed in May 2021, please replace with `mirror`. """ warnings.warn('[PHIDL] Warning: reflect() will be deprecated in ' 'May 2021, please replace with mirror()') return self.mirror(p1, p2) class Label(gdspy.Label, _GeometryHelper): """ Text that can be used to label parts of the geometry or display messages. The text does not create additional geometry, it’s meant for display and labeling purposes only. """ def __init__(self, *args, **kwargs): super(Label, self).__init__(*args, **kwargs) self.position = np.array(self.position, dtype = 'float64') @property def bbox(self): """ Returns the bounding box of the Label. """ return np.array([[self.position[0], self.position[1]], [self.position[0], self.position[1]]]) def rotate(self, angle = 45, center = (0, 0)): """ Rotates Label around the specified centerpoint. Parameters ---------- angle : int or float Angle to rotate the Label in degrees. center : array-like[2] or None Midpoint of the Label. """ self.position = _rotate_points(self.position, angle = angle, center = center) return self def move(self, origin = (0, 0), destination = None, axis = None): """ Moves the Label from the origin point to the destination. Both origin and destination can be 1x2 array-like, Port, or a key corresponding to one of the Ports in this Label. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. axis : {'x', 'y'} Direction of the move. """ dx,dy = _parse_move(origin, destination, axis) self.position += np.asarray((dx, dy)) return self def mirror(self, p1 = (0, 1), p2 = (0, 0)): """ Mirrors a Label across the line formed between the two specified points. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ self.position = _reflect_points(self.position, p1, p2) return self def reflect(self, p1 = (0, 1), p2 = (0, 0)): """ .. deprecated:: 1.3.0 `reflect` will be removed in May 2021, please replace with `mirror`. """ warnings.warn('[PHIDL] Warning: reflect() will be deprecated in ' 'May 2021, please replace with mirror()') return self.mirror(p1, p2) class Group(_GeometryHelper): """ Groups objects together so they can be manipulated as though they were a single object (move/rotate/mirror). """ def __init__(self, *args): self.elements = [] self.add(args) def __repr__(self): """ Prints the number of elements in the Group. """ return ('Group (%s elements total)' % (len(self.elements))) def __len__(self): """ Returns the number of elements in the Group. """ return len(self.elements) def __iadd__(self, element): """ Adds an element to the Group. Parameters ---------- element : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group Element to be added. """ return self.add(element) @property def bbox(self): """ Returns the bounding boxes of the Group. """ if len(self.elements) == 0: raise ValueError('[PHIDL] Group is empty, no bbox is available') bboxes = np.empty([len(self.elements),4]) for n,e in enumerate(self.elements): bboxes[n] = e.bbox.flatten() bbox = ( (bboxes[:,0].min(), bboxes[:,1].min()), (bboxes[:,2].max(), bboxes[:,3].max()) ) return np.array(bbox) def add(self, element): """ Adds an element to the Group. Parameters ---------- element : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group Element to add. """ if _is_iterable(element): [self.add(e) for e in element] elif element is None: return self elif isinstance(element, PHIDL_ELEMENTS): self.elements.append(element) else: raise ValueError('[PHIDL] add() Could not add element to Group, the only ' \ 'allowed element types are ' \ '(Device, DeviceReference, Polygon, CellArray, Label, Group)') # Remove non-unique entries used = set() self.elements = [x for x in self.elements if x not in used and (used.add(x) or True)] return self def rotate(self, angle = 45, center = (0,0)): """ Rotates all elements in a Group around the specified centerpoint. Parameters ---------- angle : int or float Angle to rotate the Group in degrees. center : array-like[2] or None Midpoint of the Group. """ for e in self.elements: e.rotate(angle = angle, center = center) return self def move(self, origin = (0,0), destination = None, axis = None): """ Moves the Group from the origin point to the destination. Both origin and destination can be 1x2 array-like, Port, or a key corresponding to one of the Ports in this Group. Parameters ---------- origin : array-like[2], Port, or key Origin point of the move. destination : array-like[2], Port, or key Destination point of the move. axis : {'x', 'y'} Direction of the move. """ for e in self.elements: e.move(origin = origin, destination = destination, axis = axis) return self def mirror(self, p1 = (0,1), p2 = (0,0)): """ Mirrors a Group across the line formed between the two specified points. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ for e in self.elements: e.mirror(p1 = p1, p2 = p2) return self def distribute(self, direction = 'x', spacing = 100, separation = True, edge = 'center'): """ Distributes the elements in the Group. Parameters ---------- direction : {'x', 'y'} Direction of distribution; either a line in the x-direction or y-direction. spacing : int or float Distance between elements. separation : bool If True, guarantees elements are speparated with a fixed spacing between; if False, elements are spaced evenly along a grid. edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'} Which edge to perform the distribution along (unused if separation == True) """ _distribute(elements = self.elements, direction = direction, spacing = spacing, separation = separation, edge = edge) return self def align(self, alignment = 'ymax'): """ Aligns the elements in the Group. Parameters ---------- alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'} Which edge to align along (e.g. 'ymax' will align move the elements such that all of their topmost points are aligned) """ _align(elements = self.elements, alignment = alignment) return self PHIDL_ELEMENTS = (Device, DeviceReference, Polygon, CellArray, Label, Group) class Path(_GeometryHelper): """ The Path object for making smooth Paths. To be used in combination with a CrossSection to create a Device. Parameters ---------- path : array-like[N][2], Path, or list of Paths Points or Paths to append() initially """ def __init__(self, path = None): self.points = np.array([[0,0]]) self.start_angle = 0 self.end_angle = 0 self.info = {} if path is not None: self.append(path) def __len__(self): return len(self.points) @property def bbox(self): """ Returns the bounding box of the Path. """ bbox = [(np.min(self.points[:,0]), np.min(self.points[:,1])), (np.max(self.points[:,0]), np.max(self.points[:,1]))] return np.array(bbox) def append(self, path): """ Attaches the input path to the end of this object. The input path will be automatically rotated and translated such that it continues smoothly from the previous segment. Parameters ---------- path : Path, array-like[N][2], or list of Paths The input path that will be appended """ # If appending another Path, load relevant variables if isinstance(path, Path): start_angle = path.start_angle end_angle = path.end_angle points = path.points # If array[N][2] elif (np.ndim(path) == 2) and np.issubdtype(np.array(path).dtype, np.number) and (np.shape(path)[1] == 2): points = path nx1,ny1 = points[1] - points[0] start_angle = np.arctan2(ny1,nx1)/np.pi*180 nx2,ny2 = points[-1] - points[-2] end_angle = np.arctan2(ny2,nx2)/np.pi*180 # If list of Paths or arrays elif isinstance(path, (list, tuple)): for p in path: self.append(p) return self else: raise ValueError('[PHIDL] Path.append() the "path" argument must be either ' + 'a Path object, an array-like[N][2] list of points, or a list of these') # Connect beginning of new points with old points points = _rotate_points(points, angle = self.end_angle - start_angle) points += self.points[-1,:] - points[0,:] # Update end angle self.end_angle = mod(end_angle + self.end_angle - start_angle, 360) # Concatenate old points + new points self.points = np.vstack([self.points, points[1:]]) return self def extrude(self, cross_section, simplify = None): """ Combines the 1D Path with a 1D CrossSection to form 2D polygons. Parameters ---------- cross_section : CrossSection The CrossSection that the Path will extrude along simplify : float Tolerance value for the simplification algorithm. All points that can be removed without changing the resulting polygon by more than the value listed here will be removed. Also known as `epsilon` here https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm Returns ------- Device A Device with polygons added that correspond to the extrusion of the Path with the CrossSection """ X = cross_section D = Device() for section in X.sections: width = section['width'] offset = section['offset'] layer = section['layer'] ports = section['ports'] if callable(offset): P_offset = self.copy() P_offset.offset(offset) points = P_offset.points start_angle = P_offset.start_angle end_angle = P_offset.end_angle offset = 0 else: points = self.points start_angle = self.start_angle end_angle = self.end_angle if callable(width): # Compute lengths dx = np.diff(self.points[:,0]) dy = np.diff(self.points[:,1]) lengths = np.cumsum(np.sqrt((dx)**2 + (dy)**2)) lengths = np.concatenate([[0], lengths]) width = width(lengths / lengths[-1]) else: pass points1 = self._parametric_offset_curve(points, offset_distance = offset + width/2, start_angle = start_angle, end_angle = end_angle) points2 = self._parametric_offset_curve(points, offset_distance = offset - width/2, start_angle = start_angle, end_angle = end_angle) # Simplify lines using the Ramer–Douglas–Peucker algorithm if isinstance(simplify, bool): raise ValueError('[PHIDL] the simplify argument must be a number (e.g. 1e-3) or None') if simplify is not None: points1 = _simplify(points1, tolerance = simplify) points2 = _simplify(points2, tolerance = simplify) # Join points together points = np.concatenate([points1, points2[::-1,:]]) # Combine the offset-lines into a polygon and union if join_after == True # if join_after == True: # Use clipper to perform a union operation # points = np.array(clipper.offset([points], 0, 'miter', 2, int(1/simplify), 0)[0]) D.add_polygon(points, layer = layer) # Add ports if they were specified if ports[0] is not None: new_port = D.add_port(name = ports[0]) new_port.endpoints = (points1[0], points2[0]) if ports[1] is not None: new_port = D.add_port(name = ports[1]) new_port.endpoints = (points2[-1], points1[-1]) return D def offset(self, offset = 0): """ Offsets the Path so that it follows the Path centerline plus an offset. The offset can either be a fixed value, or a function of the form my_offset(t) where t goes from 0->1 Parameters ---------- offset : int or float, callable Magnitude of the offset """ if offset == 0: points = self.points start_angle = self.start_angle end_angle = self.end_angle elif callable(offset): # Compute lengths dx = np.diff(self.points[:,0]) dy = np.diff(self.points[:,1]) lengths = np.cumsum(np.sqrt((dx)**2 + (dy)**2)) lengths = np.concatenate([[0], lengths]) # Create list of offset points and perform offset points = self._parametric_offset_curve(self.points, offset_distance = offset(lengths / lengths[-1]), start_angle = self.start_angle, end_angle = self.end_angle) # Numerically compute start and end angles tol = 1e-6 ds = tol/lengths[-1] ny1 = offset(ds) - offset(0) start_angle = np.arctan2(-ny1,tol)/np.pi*180 + self.start_angle start_angle = np.round(start_angle, decimals = 6) ny2 = offset(1) - offset(1 - ds) end_angle = np.arctan2(-ny2,tol)/np.pi*180 + self.end_angle end_angle = np.round(end_angle, decimals = 6) else: # Offset is just a number points = self._parametric_offset_curve(self.points, offset_distance = offset, start_angle = self.start_angle, end_angle = self.end_angle) start_angle = self.start_angle end_angle = self.end_angle self.points = points self.start_angle = start_angle self.end_angle = end_angle return self def copy(self): """ Creates a copy of the Path. Returns ------- Path A copy of the Path """ P = Path() P.info = self.info.copy() P.points = np.array(self.points) P.start_angle = self.start_angle P.end_angle = self.end_angle return P def move(self, origin = (0,0), destination = None, axis = None): """ Moves the Path from the origin point to the destination. Both origin and destination can be 1x2 array-like or a Port. Parameters ---------- origin : array-like[2], Port Origin point of the move. destination : array-like[2], Port Destination point of the move. axis : {'x', 'y'} Direction of move. """ dx,dy = _parse_move(origin, destination, axis) self.points += np.array([dx,dy]) return self def rotate(self, angle = 45, center = (0,0)): """ Rotates all Polygons in the Device around the specified center point. If no center point specified will rotate around (0,0). Parameters ---------- angle : int or float Angle to rotate the Device in degrees. center : array-like[2] or None Midpoint of the Device. """ if angle == 0: return self self.points = _rotate_points(self.points, angle, center) if self.start_angle is not None: self.start_angle = mod(self.start_angle + angle, 360) if self.end_angle is not None: self.end_angle = mod(self.end_angle + angle, 360) return self def mirror(self, p1 = (0,1), p2 = (0,0)): """ Mirrors the Path across the line formed between the two specified points. ``points`` may be input as either single points [1,2] or array-like[N][2], and will return in kind. Parameters ---------- p1 : array-like[N][2] First point of the line. p2 : array-like[N][2] Second point of the line. """ self.points = _reflect_points(self.points, p1, p2) angle = np.arctan2((p2[1]-p1[1]), (p2[0]-p1[0])) * 180/pi if self.start_angle is not None: self.start_angle = mod(2*angle - self.start_angle, 360) if self.end_angle is not None: self.end_angle = mod(2*angle - self.end_angle, 360) return self def _parametric_offset_curve(self, points, offset_distance, start_angle, end_angle): """ Creates a parametric offset (does not account for cusps etc) by using gradient of the supplied x and y points """ x = points[:,0] y = points[:,1] dxdt = np.gradient(x) dydt = np.gradient(y) if start_angle is not None: dxdt[0] = np.cos(start_angle*np.pi/180) dydt[0] = np.sin(start_angle*np.pi/180) if end_angle is not None: dxdt[-1] = np.cos(end_angle*np.pi/180) dydt[-1] = np.sin(end_angle*np.pi/180) x_offset = x + offset_distance*dydt/np.sqrt(dxdt**2 + dydt**2) y_offset = y - offset_distance*dxdt/np.sqrt(dydt**2 + dxdt**2) return np.array([x_offset, y_offset]).T def length(self): """ Computes the cumulative length (arc length) of the path. Returns ------- float The length of the Path """ x = self.points[:,0] y = self.points[:,1] dx = np.diff(x) dy = np.diff(y) return np.sum(np.sqrt((dx)**2 + (dy)**2)) def curvature(self): """ Calculates the curvature of the Path. Note this curvature is numerically computed so areas where the curvature jumps instantaneously (such as between an arc and a straight segment) will be slightly interpolated, and sudden changes in point density along the curve can cause discontinuities. Returns ------- s : array-like[N] The arc-length of the Path K : array-like[N] The curvature of the Path """ x = self.points[:,0] y = self.points[:,1] dx = np.diff(x) dy = np.diff(y) ds = np.sqrt((dx)**2 + (dy)**2) s = np.cumsum(ds) theta = np.arctan2(dy,dx) # Fix discontinuities arising from np.arctan2 dtheta = np.diff(theta) dtheta[np.where(dtheta > np.pi)] += -2*np.pi dtheta[np.where(dtheta < -np.pi)] += 2*np.pi theta = np.concatenate([[0], np.cumsum(dtheta)]) + theta[0] K = np.gradient(theta,s, edge_order = 2) return s, K class CrossSection(object): """ The CrossSection object for extruding a Path. To be used in combination with a Path to create a Device. Parameters ---------- path : array-like[N][2], Path, or list of Paths Points or Paths to append() initially """ def __init__(self): self.sections = [] self.ports = set() self.aliases = {} self.info = {} def add(self, width = 1, offset = 0, layer = 0, ports = (None,None), name = None): """ Adds a cross-sectional element to the CrossSection. If ports are specified, when creating a Device with the extrude() command there be have Ports at the ends. Parameters ---------- width : float Width of the segment offset : float Offset of the segment (positive values = right hand side) layer : int, tuple of int, or set of int The polygon layer to put the segment on ports : array-like[2] of str, int, or None If not None, specifies the names for the ports at the ends of the cross-sectional element name : str, int, or None Name of the cross-sectional element for later access """ if isinstance(width, (float, int)) and (width <= 0): raise ValueError('[PHIDL] CrossSection.add(): widths must be >0') if len(ports) != 2: raise ValueError('[PHIDL] CrossSection.add(): must receive 2 port names') for p in ports: if p in self.ports: raise ValueError('[PHIDL] CrossSection.add(): a port named "%s" already ' \ "exists in this CrossSection, please rename port" % p) if name in self.aliases: raise ValueError('[PHIDL] CrossSection.add(): an element named "%s" already ' \ "exists in this CrossSection, please change the name" % name) new_segment = dict( width = width, offset = offset, layer = layer, ports = ports, ) if name is not None: self.aliases[name] = new_segment self.sections.append(new_segment) [self.ports.add(p) for p in ports if p is not None] return self def extrude(self, path, simplify = None): """ Combines the 1D CrossSection with a 1D Path to form 2D polygons. Parameters ---------- path : Path The Path for the CrossSection to follow simplify : float Tolerance value for the simplification algorithm. All points that can be removed without changing the resulting polygon by more than the value listed here will be removed. Also known as `epsilon` here https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm Returns ------- Device A Device with polygons added that correspond to the extrusion of the Path with the CrossSection """ D = path.extrude(cross_section = self, simplify = simplify) return D def copy(self): """ Creates a copy of the CrosSection. Returns ------- CrossSection A copy of the CrossSection """ X = CrossSection() X.info = self.info.copy() X.sections = list(self.sections) X.ports = set(self.ports) X.aliases = dict(self.aliases) return X def __getitem__(self, key): """ Allows access to cross-sectional elements by name like X['etch2']. Parameters ---------- key : str Element name to access within the CrossSection. """ try: return self.aliases[key] except: raise ValueError('[PHIDL] Tried to access name "%s" in CrossSection ' 'which does not exist' % (key))
mit
-5,452,652,125,037,011,000
34.806163
202
0.550058
false
andyr0id/PyGFNN
pygfnn/tools/customxml/networkreader.py
1
3456
__author__ = 'Andrew J. Lambert, andy@andyroid.co.uk' # those imports are necessary for the eval() commands to find the right classes import pygfnn #@UnusedImport import pybrain #@UnusedImport from scipy import array #@UnusedImport from pybrain.tools.customxml import NetworkReader as PyBrainNetworkReader from numpy import ndarray, loadtxt, reshape from io import BytesIO class NetworkReader(PyBrainNetworkReader): @staticmethod def readFrom(filename, name = None, index = 0): """ append the network to an existing xml file :key name: if this parameter is specified, read the network with this name :key index: which network in the file shall be read (if there is more than one) """ r = NetworkReader(filename, newfile = False) if name: netroot = r.findNamedNode('Network', name) else: netroot = r.findNode('Network', index) return r.readNetwork(netroot) def readNetwork(self, node): # TODO: why is this necessary? import pybrain.structure.networks.custom #@Reimport @UnusedImport nclass = eval(str(node.getAttribute('class'))) argdict = self.readArgs(node) n = nclass(**argdict) n.name = node.getAttribute('name') for mnode in self.getChildrenOf(self.getChild(node, 'Modules')): m, inmodule, outmodule = self.readModule(mnode) if inmodule: n.addInputModule(m) elif outmodule: n.addOutputModule(m) else: n.addModule(m) mconns = self.getChild(node, 'MotherConnections') if mconns: for mcnode in self.getChildrenOf(mconns): m = self.readBuildable(mcnode) self.mothers[m.name] = m for cnode in self.getChildrenOf(self.getChild(node, 'Connections')): c, recurrent = self.readConnection(cnode) if recurrent: n.addRecurrentConnection(c) else: n.addConnection(c) n.sortModules() return n def readBuildable(self, node): mclass = node.getAttribute('class') argdict = self.readArgs(node) try: m = eval(mclass)(**argdict) except: print('Could not construct', mclass) print('with arguments:', argdict) return None m.name = node.getAttribute('name') self.readParams(node, m) return m def readArgs(self, node): res = {} for c in self.getChildrenOf(node): val = c.getAttribute('val') if val in self.modules: res[str(c.nodeName)] = self.modules[val] elif val in self.mothers: res[str(c.nodeName)] = self.mothers[val] elif val == 'ndarray': valDtype = c.getAttribute('dtype') valShape = [int(x) for x in c.getAttribute('shape').split('x')] val = loadtxt(BytesIO(str(c.firstChild.data))).view(valDtype) res[str(c.nodeName)] = val.reshape(valShape) elif val != '': res[str(c.nodeName)] = eval(val) return res def readParams(self, node, m): import string pnode = self.getChild(node, 'Parameters') if pnode: params = eval(string.strip(pnode.firstChild.data)) m._setParameters(params)
gpl-2.0
7,732,179,394,367,844,000
35.010417
87
0.582755
false
naitoh/py2rb
tests/numpy/arg_max_min.py
1
1034
# coding: utf-8 import numpy as np def print_matrix(data): data_i = [] for i in list(data): data_j = [] for j in i: data_j.append(int("%d" % j)) data_i.append(data_j) print(data_i) def print_array(data): datas = [] for i in data: datas.append(int("%d" % i)) print(datas) a = np.asarray([[1,2,3],[4,5,6]]) print_matrix(a) x = np.argmax(a) print(x) """ <Not Compatible with axis> Python argmax : [1, 1, 1] Ruby max_index : [3, 4, 5] (flat Array index) """ x = np.argmax(a, axis=0) print_array(x) """ <Not Compatible with axis> Python argmax : [2, 2] Ruby max_index : [2, 5] (flat Array index) """ x = np.argmax(a, axis=1) print_array(x) x = np.argmin(a) print(x) """ <Not Compatible with axis> Python argmax : [0, 0, 0] Ruby max_index : [0, 1, 2] (flat Array index) """ x = np.argmin(a, axis=0) print_array(x) """ <Not Compatible with axis> Python argmax : [0, 0] Ruby max_index : [0, 3] (flat Array index) """ x = np.argmin(a, axis=1) print_array(x)
mit
-577,519,652,922,216,600
16.525424
45
0.574468
false
Ichaelus/Github-Classifier
Playground/utilities.py
1
8308
# -*- coding: utf-8 -*- import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer import os #import nltk import base64 import random import json from urllib2 import Request, urlopen, URLError import re from nltk.stem import PorterStemmer import string import sklearn # Constants max_stars = 1000 # Max found in data was 52762 max_forks = 100 # Max found in data was 9287 max_watches = 10 # Max found in data was 3709 max_folder_count = 100 # To be improved max_treeDepth = 100 max_branch_count = 10 max_forks = 100 max_commit_interval_avg = 10 max_contributors_count = 100 max_open_issues_count = 10 max_avg_commit_length = 100 max_file_count = 100 max_commit_interval_max = 10 stemmer = PorterStemmer() def one_hot_encoding(labels): # Get labels of type [1, 0, 2, ...] and convert to array # of type [[0, 1, 0, ..], [1, 0, 0, ..], [0, 0, 1, ...]] arr = np.zeros((len(labels), max(labels) + 1), np.uint8) arr[np.arange(len(labels)), labels] = 1 return arr def get_unlabeled_data(whatIWant='description'): # Standardmäßig wird NUR die description verwendet, nicht die readme #hole data als dict data = api_call(url="Gimme unlabeld pls") features = [] for i in xrange(len(data)): feature = None if whatIWant == 'readme': #nur die readme ist anscheinend decoded feature = text_from_base64(data[i][whatIWant]).decode('utf-8') elif whatIWant == 'meta': """ Availible metadata: description, author, url, tree, watches, class, languages, tagger, stars, readme, forks, id, name """ feature = [] sample = data[i] feature.append(float(sample['watches']) / max_watches) feature.append(float(sample['stars']) / max_stars) feature.append(float(sample['forks']) / max_forks) features.append(feature) else: feature = data[i][whatIWant] if whatIWant != 'meta': feature = process_text(feature) features.append(feature) return features def text_from_base64(text): missing_padding = len(text) % 4 if missing_padding != 0: text += b'='* (4 - missing_padding) text = None try: text = base64.b64decode(text) except TypeError: print "Error decoding readme" return text def process_text(text): # Process string readme_codefree = "" words = "" final_words = "" if text is not None: for no_code in text.split("```")[::2]: # skip content code in e.g. blalba```code```blabla readme_codefree += no_code # Remove urls readme_urlfree = re.sub(r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)', ' ', readme_codefree) for word in ((char if char.isalpha() else " ") for char in readme_urlfree): try: words += word.decode('ascii') except UnicodeEncodeError: continue words = " ".join(words.split()) for word in words.split(): final_words += stemmer.stem(word) + " " return words def shuffle_data(a, b): return sklearn.utils.shuffle(a, b) def get_data(whatIWant='description', binary = False, equal=False, no_dev=False, old_data=False): # Standardmäßig wird NUR die description verwendet, nicht die readme #hole data als dict data = api_call(equal=equal, old=old_data) #liste mit strings von den feature texten features = [] #die namen der klassen label_names = [] #die klassen labels = [] # vectorizer braucht liste von strings, hier wirds umgewandelt for i in xrange(len(data)): feature = None if whatIWant.lower() == 'readme': #nur die readme ist anscheinend decoded try: feature = base64.b64decode(data[i][whatIWant]) except TypeError: continue feature = feature.decode('utf-8') elif whatIWant.lower() == 'meta': feature = [] sample = data[i] feature.append(float(sample['hasDownloads'])) feature.append(float(sample['watches']) / max_watches) feature.append(float(sample['folder_count']) / max_folder_count) feature.append(float(sample['treeDepth']) / max_treeDepth) feature.append(float(sample['stars']) / max_stars) feature.append(float(sample['branch_count']) / max_branch_count) feature.append(float(sample['forks']) / max_forks) feature.append(float(sample['commit_interval_avg']) / max_commit_interval_avg) feature.append(float(sample['contributors_count']) / max_contributors_count) feature.append(float(sample['open_issues_count']) / max_open_issues_count) feature.append(float(sample['avg_commit_length']) / max_avg_commit_length) feature.append(float(sample['hasWiki'])) feature.append(float(sample['file_count']) / max_file_count) feature.append(float(sample['commit_interval_max']) / max_commit_interval_max) feature.append(float(sample['isFork'])) else: feature = data[i][whatIWant] if whatIWant.lower() != 'meta': feature = process_text(feature) if binary: if data[i]['class'] == 'DEV': label = 'DEV' else: label = 'NOTDEV' else: label = data[i]['class'] if not no_dev or label != 'DEV': if label not in label_names: label_names.append(label) features.append(feature) labels.append(label_names.index(label)) #if i % 50 == 0: # print "{} repos processed".format(i) return (features, labels, label_names) def get_batch(features, labels, nb_batch): x, y = shuffle_data(features, labels) return (x[:nb_batch], y[:nb_batch]) def api_call(equal=False, old=False): filter = base64.b64encode(b'id>0') url = None if equal: url = 'http://classifier.leimstaedtner.it/ajax.php?key=api:equal&filter='+filter.decode("utf-8") if old: url = 'http://classifier.leimstaedtner.it/ajax.php?key=api:old&filter='+filter.decode("utf-8") else: url = 'http://classifier.leimstaedtner.it/ajax.php?key=api:all&filter='+filter.decode("utf-8") request = Request(url) try: response = urlopen(request) data = json.load(response) except URLError, e: print 'Error with api call', e return data # Nimmt die ersten ratio * 100% Elemente zum trainieren, Rest zum Testen def split_train_test(features, labels, ratio=0.7, shuffle=False): cut = int(ratio * len(labels)) features_train, labels_train = features[:cut], labels[:cut] features_test, labels_test = features[cut:], labels[cut:] # Shuffle data for better training results if shuffle: features_train, labels_train = shuffle_data(features_train, labels_train) return (features_train, features_test, labels_train, labels_test) # wandelt text in matrix um, stop_words sind die ausfilterung # von unwichtigen wörtern # https://de.wikipedia.org/wiki/Tf-idf-Maß # http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html def vectorize_text(features, max_features=2000): vectorizer = TfidfVectorizer(sublinear_tf=True, stop_words='english', decode_error='strict', analyzer='word', ngram_range=(1, 2), max_features=max_features #max_df=0.5 # Verwendet im ML-Kurs unter Preprocessing ) feature_vec = vectorizer.fit_transform(features) return feature_vec.toarray(), vectorizer # wird im moment nicht verwendet, kann aber später hilfreich sein #def get_synonyms(str): #synonyms = [] #for syn in wn.synsets(str): #for x in syn.lemmas(): #synonyms.append(x.name()) #return synonyms
mit
3,352,495,437,871,071,000
35.568282
149
0.594145
false
ContextLab/quail
quail/analysis/accuracy.py
1
1660
from __future__ import division import numpy as np from .recmat import recall_matrix def accuracy_helper(egg, match='exact', distance='euclidean', features=None): """ Computes proportion of words recalled Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prop_recalled : numpy array proportion of words recalled """ def acc(lst): return len([i for i in np.unique(lst) if i>=0])/(egg.list_length) opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [acc(lst) for lst in recmat] elif match is 'smooth': result = np.mean(recmat, axis=1) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
mit
4,599,930,361,724,805,600
32.2
80
0.645783
false
joseph-torres/spark
python/pyspark/sql/tests.py
1
231543
# -*- encoding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Unit tests for pyspark.sql; additional tests are implemented as doctests in individual modules. """ import os import sys import subprocess import pydoc import shutil import tempfile import pickle import functools import time import datetime import array import ctypes import warnings import py4j from contextlib import contextmanager try: import xmlrunner except ImportError: xmlrunner = None if sys.version_info[:2] <= (2, 6): try: import unittest2 as unittest except ImportError: sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier') sys.exit(1) else: import unittest from pyspark.util import _exception_message _pandas_requirement_message = None try: from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() except ImportError as e: # If Pandas version requirement is not satisfied, skip related tests. _pandas_requirement_message = _exception_message(e) _pyarrow_requirement_message = None try: from pyspark.sql.utils import require_minimum_pyarrow_version require_minimum_pyarrow_version() except ImportError as e: # If Arrow version requirement is not satisfied, skip related tests. _pyarrow_requirement_message = _exception_message(e) _have_pandas = _pandas_requirement_message is None _have_pyarrow = _pyarrow_requirement_message is None from pyspark import SparkContext from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row from pyspark.sql.types import * from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings from pyspark.sql.types import _merge_type from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests from pyspark.sql.functions import UserDefinedFunction, sha2, lit from pyspark.sql.window import Window from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException class UTCOffsetTimezone(datetime.tzinfo): """ Specifies timezone in UTC offset """ def __init__(self, offset=0): self.ZERO = datetime.timedelta(hours=offset) def utcoffset(self, dt): return self.ZERO def dst(self, dt): return self.ZERO class ExamplePointUDT(UserDefinedType): """ User-defined type (UDT) for ExamplePoint. """ @classmethod def sqlType(self): return ArrayType(DoubleType(), False) @classmethod def module(cls): return 'pyspark.sql.tests' @classmethod def scalaUDT(cls): return 'org.apache.spark.sql.test.ExamplePointUDT' def serialize(self, obj): return [obj.x, obj.y] def deserialize(self, datum): return ExamplePoint(datum[0], datum[1]) class ExamplePoint: """ An example class to demonstrate UDT in Scala, Java, and Python. """ __UDT__ = ExamplePointUDT() def __init__(self, x, y): self.x = x self.y = y def __repr__(self): return "ExamplePoint(%s,%s)" % (self.x, self.y) def __str__(self): return "(%s,%s)" % (self.x, self.y) def __eq__(self, other): return isinstance(other, self.__class__) and \ other.x == self.x and other.y == self.y class PythonOnlyUDT(UserDefinedType): """ User-defined type (UDT) for ExamplePoint. """ @classmethod def sqlType(self): return ArrayType(DoubleType(), False) @classmethod def module(cls): return '__main__' def serialize(self, obj): return [obj.x, obj.y] def deserialize(self, datum): return PythonOnlyPoint(datum[0], datum[1]) @staticmethod def foo(): pass @property def props(self): return {} class PythonOnlyPoint(ExamplePoint): """ An example class to demonstrate UDT in only Python """ __UDT__ = PythonOnlyUDT() class MyObject(object): def __init__(self, key, value): self.key = key self.value = value class SQLTestUtils(object): """ This util assumes the instance of this to have 'spark' attribute, having a spark session. It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the the implementation of this class has 'spark' attribute. """ @contextmanager def sql_conf(self, pairs): """ A convenient context manager to test some configuration specific logic. This sets `value` to the configuration `key` and then restores it back when it exits. """ assert isinstance(pairs, dict), "pairs should be a dictionary." assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session." keys = pairs.keys() new_values = pairs.values() old_values = [self.spark.conf.get(key, None) for key in keys] for key, new_value in zip(keys, new_values): self.spark.conf.set(key, new_value) try: yield finally: for key, old_value in zip(keys, old_values): if old_value is None: self.spark.conf.unset(key) else: self.spark.conf.set(key, old_value) class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils): @classmethod def setUpClass(cls): ReusedPySparkTestCase.setUpClass() cls.spark = SparkSession(cls.sc) @classmethod def tearDownClass(cls): ReusedPySparkTestCase.tearDownClass() cls.spark.stop() def assertPandasEqual(self, expected, result): msg = ("DataFrames are not equal: " + "\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) + "\n\nResult:\n%s\n%s" % (result, result.dtypes)) self.assertTrue(expected.equals(result), msg=msg) class DataTypeTests(unittest.TestCase): # regression test for SPARK-6055 def test_data_type_eq(self): lt = LongType() lt2 = pickle.loads(pickle.dumps(LongType())) self.assertEqual(lt, lt2) # regression test for SPARK-7978 def test_decimal_type(self): t1 = DecimalType() t2 = DecimalType(10, 2) self.assertTrue(t2 is not t1) self.assertNotEqual(t1, t2) t3 = DecimalType(8) self.assertNotEqual(t2, t3) # regression test for SPARK-10392 def test_datetype_equal_zero(self): dt = DateType() self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1)) # regression test for SPARK-17035 def test_timestamp_microsecond(self): tst = TimestampType() self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999) def test_empty_row(self): row = Row() self.assertEqual(len(row), 0) def test_struct_field_type_name(self): struct_field = StructField("a", IntegerType()) self.assertRaises(TypeError, struct_field.typeName) class SQLTests(ReusedSQLTestCase): @classmethod def setUpClass(cls): ReusedSQLTestCase.setUpClass() cls.tempdir = tempfile.NamedTemporaryFile(delete=False) os.unlink(cls.tempdir.name) cls.testData = [Row(key=i, value=str(i)) for i in range(100)] cls.df = cls.spark.createDataFrame(cls.testData) @classmethod def tearDownClass(cls): ReusedSQLTestCase.tearDownClass() shutil.rmtree(cls.tempdir.name, ignore_errors=True) def test_sqlcontext_reuses_sparksession(self): sqlContext1 = SQLContext(self.sc) sqlContext2 = SQLContext(self.sc) self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession) def tearDown(self): super(SQLTests, self).tearDown() # tear down test_bucketed_write state self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket") def test_row_should_be_read_only(self): row = Row(a=1, b=2) self.assertEqual(1, row.a) def foo(): row.a = 3 self.assertRaises(Exception, foo) row2 = self.spark.range(10).first() self.assertEqual(0, row2.id) def foo2(): row2.id = 2 self.assertRaises(Exception, foo2) def test_range(self): self.assertEqual(self.spark.range(1, 1).count(), 0) self.assertEqual(self.spark.range(1, 0, -1).count(), 1) self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2) self.assertEqual(self.spark.range(-2).count(), 0) self.assertEqual(self.spark.range(3).count(), 3) def test_duplicated_column_names(self): df = self.spark.createDataFrame([(1, 2)], ["c", "c"]) row = df.select('*').first() self.assertEqual(1, row[0]) self.assertEqual(2, row[1]) self.assertEqual("Row(c=1, c=2)", str(row)) # Cannot access columns self.assertRaises(AnalysisException, lambda: df.select(df[0]).first()) self.assertRaises(AnalysisException, lambda: df.select(df.c).first()) self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first()) def test_column_name_encoding(self): """Ensure that created columns has `str` type consistently.""" columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns self.assertEqual(columns, ['name', 'age']) self.assertTrue(isinstance(columns[0], str)) self.assertTrue(isinstance(columns[1], str)) def test_explode(self): from pyspark.sql.functions import explode, explode_outer, posexplode_outer d = [ Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}), Row(a=1, intlist=[], mapfield={}), Row(a=1, intlist=None, mapfield=None), ] rdd = self.sc.parallelize(d) data = self.spark.createDataFrame(rdd) result = data.select(explode(data.intlist).alias("a")).select("a").collect() self.assertEqual(result[0][0], 1) self.assertEqual(result[1][0], 2) self.assertEqual(result[2][0], 3) result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect() self.assertEqual(result[0][0], "a") self.assertEqual(result[0][1], "b") result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()] self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)]) result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()] self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)]) result = [x[0] for x in data.select(explode_outer("intlist")).collect()] self.assertEqual(result, [1, 2, 3, None, None]) result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()] self.assertEqual(result, [('a', 'b'), (None, None), (None, None)]) def test_and_in_expression(self): self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count()) self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2")) self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count()) self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2") self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count()) self.assertRaises(ValueError, lambda: not self.df.key == 1) def test_udf_with_callable(self): d = [Row(number=i, squared=i**2) for i in range(10)] rdd = self.sc.parallelize(d) data = self.spark.createDataFrame(rdd) class PlusFour: def __call__(self, col): if col is not None: return col + 4 call = PlusFour() pudf = UserDefinedFunction(call, LongType()) res = data.select(pudf(data['number']).alias('plus_four')) self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85) def test_udf_with_partial_function(self): d = [Row(number=i, squared=i**2) for i in range(10)] rdd = self.sc.parallelize(d) data = self.spark.createDataFrame(rdd) def some_func(col, param): if col is not None: return col + param pfunc = functools.partial(some_func, param=4) pudf = UserDefinedFunction(pfunc, LongType()) res = data.select(pudf(data['number']).alias('plus_four')) self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85) def test_udf(self): self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType()) [row] = self.spark.sql("SELECT twoArgs('test', 1)").collect() self.assertEqual(row[0], 5) # This is to check if a deprecated 'SQLContext.registerFunction' can call its alias. sqlContext = self.spark._wrapped sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType()) [row] = sqlContext.sql("SELECT oneArg('test')").collect() self.assertEqual(row[0], 4) def test_udf2(self): self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType()) self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\ .createOrReplaceTempView("test") [res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect() self.assertEqual(4, res[0]) def test_udf3(self): two_args = self.spark.catalog.registerFunction( "twoArgs", UserDefinedFunction(lambda x, y: len(x) + y)) self.assertEqual(two_args.deterministic, True) [row] = self.spark.sql("SELECT twoArgs('test', 1)").collect() self.assertEqual(row[0], u'5') def test_udf_registration_return_type_none(self): two_args = self.spark.catalog.registerFunction( "twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None) self.assertEqual(two_args.deterministic, True) [row] = self.spark.sql("SELECT twoArgs('test', 1)").collect() self.assertEqual(row[0], 5) def test_udf_registration_return_type_not_none(self): with QuietTest(self.sc): with self.assertRaisesRegexp(TypeError, "Invalid returnType"): self.spark.catalog.registerFunction( "f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType()) def test_nondeterministic_udf(self): # Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations from pyspark.sql.functions import udf import random udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic() self.assertEqual(udf_random_col.deterministic, False) df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND')) udf_add_ten = udf(lambda rand: rand + 10, IntegerType()) [row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect() self.assertEqual(row[0] + 10, row[1]) def test_nondeterministic_udf2(self): import random from pyspark.sql.functions import udf random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic() self.assertEqual(random_udf.deterministic, False) random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf) self.assertEqual(random_udf1.deterministic, False) [row] = self.spark.sql("SELECT randInt()").collect() self.assertEqual(row[0], 6) [row] = self.spark.range(1).select(random_udf1()).collect() self.assertEqual(row[0], 6) [row] = self.spark.range(1).select(random_udf()).collect() self.assertEqual(row[0], 6) # render_doc() reproduces the help() exception without printing output pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType())) pydoc.render_doc(random_udf) pydoc.render_doc(random_udf1) pydoc.render_doc(udf(lambda x: x).asNondeterministic) def test_nondeterministic_udf3(self): # regression test for SPARK-23233 from pyspark.sql.functions import udf f = udf(lambda x: x) # Here we cache the JVM UDF instance. self.spark.range(1).select(f("id")) # This should reset the cache to set the deterministic status correctly. f = f.asNondeterministic() # Check the deterministic status of udf. df = self.spark.range(1).select(f("id")) deterministic = df._jdf.logicalPlan().projectList().head().deterministic() self.assertFalse(deterministic) def test_nondeterministic_udf_in_aggregate(self): from pyspark.sql.functions import udf, sum import random udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic() df = self.spark.range(10) with QuietTest(self.sc): with self.assertRaisesRegexp(AnalysisException, "nondeterministic"): df.groupby('id').agg(sum(udf_random_col())).collect() with self.assertRaisesRegexp(AnalysisException, "nondeterministic"): df.agg(sum(udf_random_col())).collect() def test_chained_udf(self): self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType()) [row] = self.spark.sql("SELECT double(1)").collect() self.assertEqual(row[0], 2) [row] = self.spark.sql("SELECT double(double(1))").collect() self.assertEqual(row[0], 4) [row] = self.spark.sql("SELECT double(double(1) + 1)").collect() self.assertEqual(row[0], 6) def test_single_udf_with_repeated_argument(self): # regression test for SPARK-20685 self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType()) row = self.spark.sql("SELECT add(1, 1)").first() self.assertEqual(tuple(row), (2, )) def test_multiple_udfs(self): self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType()) [row] = self.spark.sql("SELECT double(1), double(2)").collect() self.assertEqual(tuple(row), (2, 4)) [row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect() self.assertEqual(tuple(row), (4, 12)) self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType()) [row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect() self.assertEqual(tuple(row), (6, 5)) def test_udf_in_filter_on_top_of_outer_join(self): from pyspark.sql.functions import udf left = self.spark.createDataFrame([Row(a=1)]) right = self.spark.createDataFrame([Row(a=1)]) df = left.join(right, on='a', how='left_outer') df = df.withColumn('b', udf(lambda x: 'x')(df.a)) self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')]) def test_udf_in_filter_on_top_of_join(self): # regression test for SPARK-18589 from pyspark.sql.functions import udf left = self.spark.createDataFrame([Row(a=1)]) right = self.spark.createDataFrame([Row(b=1)]) f = udf(lambda a, b: a == b, BooleanType()) df = left.crossJoin(right).filter(f("a", "b")) self.assertEqual(df.collect(), [Row(a=1, b=1)]) def test_udf_without_arguments(self): self.spark.catalog.registerFunction("foo", lambda: "bar") [row] = self.spark.sql("SELECT foo()").collect() self.assertEqual(row[0], "bar") def test_udf_with_array_type(self): d = [Row(l=list(range(3)), d={"key": list(range(5))})] rdd = self.sc.parallelize(d) self.spark.createDataFrame(rdd).createOrReplaceTempView("test") self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType())) self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType()) [(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect() self.assertEqual(list(range(3)), l1) self.assertEqual(1, l2) def test_broadcast_in_udf(self): bar = {"a": "aa", "b": "bb", "c": "abc"} foo = self.sc.broadcast(bar) self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '') [res] = self.spark.sql("SELECT MYUDF('c')").collect() self.assertEqual("abc", res[0]) [res] = self.spark.sql("SELECT MYUDF('')").collect() self.assertEqual("", res[0]) def test_udf_with_filter_function(self): df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"]) from pyspark.sql.functions import udf, col from pyspark.sql.types import BooleanType my_filter = udf(lambda a: a < 2, BooleanType()) sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2")) self.assertEqual(sel.collect(), [Row(key=1, value='1')]) def test_udf_with_aggregate_function(self): df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"]) from pyspark.sql.functions import udf, col, sum from pyspark.sql.types import BooleanType my_filter = udf(lambda a: a == 1, BooleanType()) sel = df.select(col("key")).distinct().filter(my_filter(col("key"))) self.assertEqual(sel.collect(), [Row(key=1)]) my_copy = udf(lambda x: x, IntegerType()) my_add = udf(lambda a, b: int(a + b), IntegerType()) my_strlen = udf(lambda x: len(x), IntegerType()) sel = df.groupBy(my_copy(col("key")).alias("k"))\ .agg(sum(my_strlen(col("value"))).alias("s"))\ .select(my_add(col("k"), col("s")).alias("t")) self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)]) def test_udf_in_generate(self): from pyspark.sql.functions import udf, explode df = self.spark.range(5) f = udf(lambda x: list(range(x)), ArrayType(LongType())) row = df.select(explode(f(*df))).groupBy().sum().first() self.assertEqual(row[0], 10) df = self.spark.range(3) res = df.select("id", explode(f(df.id))).collect() self.assertEqual(res[0][0], 1) self.assertEqual(res[0][1], 0) self.assertEqual(res[1][0], 2) self.assertEqual(res[1][1], 0) self.assertEqual(res[2][0], 2) self.assertEqual(res[2][1], 1) range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType())) res = df.select("id", explode(range_udf(df.id))).collect() self.assertEqual(res[0][0], 0) self.assertEqual(res[0][1], -1) self.assertEqual(res[1][0], 0) self.assertEqual(res[1][1], 0) self.assertEqual(res[2][0], 1) self.assertEqual(res[2][1], 0) self.assertEqual(res[3][0], 1) self.assertEqual(res[3][1], 1) def test_udf_with_order_by_and_limit(self): from pyspark.sql.functions import udf my_copy = udf(lambda x: x, IntegerType()) df = self.spark.range(10).orderBy("id") res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1) res.explain(True) self.assertEqual(res.collect(), [Row(id=0, copy=0)]) def test_udf_registration_returns_udf(self): df = self.spark.range(10) add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType()) self.assertListEqual( df.selectExpr("add_three(id) AS plus_three").collect(), df.select(add_three("id").alias("plus_three")).collect() ) # This is to check if a 'SQLContext.udf' can call its alias. sqlContext = self.spark._wrapped add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType()) self.assertListEqual( df.selectExpr("add_four(id) AS plus_four").collect(), df.select(add_four("id").alias("plus_four")).collect() ) def test_non_existed_udf(self): spark = self.spark self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf", lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf")) # This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias. sqlContext = spark._wrapped self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf", lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf")) def test_non_existed_udaf(self): spark = self.spark self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf", lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf")) def test_linesep_text(self): df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",") expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'), Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'), Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'), Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')] self.assertEqual(df.collect(), expected) tpath = tempfile.mkdtemp() shutil.rmtree(tpath) try: df.write.text(tpath, lineSep="!") expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'), Row(value=u'Tom!30!"My name is Tom"'), Row(value=u'Hyukjin!25!"I am Hyukjin'), Row(value=u''), Row(value=u'I love Spark!"'), Row(value=u'!')] readback = self.spark.read.text(tpath) self.assertEqual(readback.collect(), expected) finally: shutil.rmtree(tpath) def test_multiline_json(self): people1 = self.spark.read.json("python/test_support/sql/people.json") people_array = self.spark.read.json("python/test_support/sql/people_array.json", multiLine=True) self.assertEqual(people1.collect(), people_array.collect()) def test_encoding_json(self): people_array = self.spark.read\ .json("python/test_support/sql/people_array_utf16le.json", multiLine=True, encoding="UTF-16LE") expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')] self.assertEqual(people_array.collect(), expected) def test_linesep_json(self): df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",") expected = [Row(_corrupt_record=None, name=u'Michael'), Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None), Row(_corrupt_record=u' "age":19}\n', name=None)] self.assertEqual(df.collect(), expected) tpath = tempfile.mkdtemp() shutil.rmtree(tpath) try: df = self.spark.read.json("python/test_support/sql/people.json") df.write.json(tpath, lineSep="!!") readback = self.spark.read.json(tpath, lineSep="!!") self.assertEqual(readback.collect(), df.collect()) finally: shutil.rmtree(tpath) def test_multiline_csv(self): ages_newlines = self.spark.read.csv( "python/test_support/sql/ages_newlines.csv", multiLine=True) expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'), Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'), Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')] self.assertEqual(ages_newlines.collect(), expected) def test_ignorewhitespace_csv(self): tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv( tmpPath, ignoreLeadingWhiteSpace=False, ignoreTrailingWhiteSpace=False) expected = [Row(value=u' a,b , c ')] readback = self.spark.read.text(tmpPath) self.assertEqual(readback.collect(), expected) shutil.rmtree(tmpPath) def test_read_multiple_orc_file(self): df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0", "python/test_support/sql/orc_partitioned/b=1/c=1"]) self.assertEqual(2, df.count()) def test_udf_with_input_file_name(self): from pyspark.sql.functions import udf, input_file_name sourceFile = udf(lambda path: path, StringType()) filePath = "python/test_support/sql/people1.json" row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first() self.assertTrue(row[0].find("people1.json") != -1) def test_udf_with_input_file_name_for_hadooprdd(self): from pyspark.sql.functions import udf, input_file_name def filename(path): return path sameText = udf(filename, StringType()) rdd = self.sc.textFile('python/test_support/sql/people.json') df = self.spark.read.json(rdd).select(input_file_name().alias('file')) row = df.select(sameText(df['file'])).first() self.assertTrue(row[0].find("people.json") != -1) rdd2 = self.sc.newAPIHadoopFile( 'python/test_support/sql/people.json', 'org.apache.hadoop.mapreduce.lib.input.TextInputFormat', 'org.apache.hadoop.io.LongWritable', 'org.apache.hadoop.io.Text') df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file')) row2 = df2.select(sameText(df2['file'])).first() self.assertTrue(row2[0].find("people.json") != -1) def test_udf_defers_judf_initalization(self): # This is separate of UDFInitializationTests # to avoid context initialization # when udf is called from pyspark.sql.functions import UserDefinedFunction f = UserDefinedFunction(lambda x: x, StringType()) self.assertIsNone( f._judf_placeholder, "judf should not be initialized before the first call." ) self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.") self.assertIsNotNone( f._judf_placeholder, "judf should be initialized after UDF has been called." ) def test_udf_with_string_return_type(self): from pyspark.sql.functions import UserDefinedFunction add_one = UserDefinedFunction(lambda x: x + 1, "integer") make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>") make_array = UserDefinedFunction( lambda x: [float(x) for x in range(x, x + 3)], "array<double>") expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0]) actual = (self.spark.range(1, 2).toDF("x") .select(add_one("x"), make_pair("x"), make_array("x")) .first()) self.assertTupleEqual(expected, actual) def test_udf_shouldnt_accept_noncallable_object(self): from pyspark.sql.functions import UserDefinedFunction non_callable = None self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType()) def test_udf_with_decorator(self): from pyspark.sql.functions import lit, udf from pyspark.sql.types import IntegerType, DoubleType @udf(IntegerType()) def add_one(x): if x is not None: return x + 1 @udf(returnType=DoubleType()) def add_two(x): if x is not None: return float(x + 2) @udf def to_upper(x): if x is not None: return x.upper() @udf() def to_lower(x): if x is not None: return x.lower() @udf def substr(x, start, end): if x is not None: return x[start:end] @udf("long") def trunc(x): return int(x) @udf(returnType="double") def as_double(x): return float(x) df = ( self.spark .createDataFrame( [(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float")) .select( add_one("one"), add_two("one"), to_upper("Foo"), to_lower("Foo"), substr("foobar", lit(0), lit(3)), trunc("float"), as_double("one"))) self.assertListEqual( [tpe for _, tpe in df.dtypes], ["int", "double", "string", "string", "string", "bigint", "double"] ) self.assertListEqual( list(df.first()), [2, 3.0, "FOO", "foo", "foo", 3, 1.0] ) def test_udf_wrapper(self): from pyspark.sql.functions import udf from pyspark.sql.types import IntegerType def f(x): """Identity""" return x return_type = IntegerType() f_ = udf(f, return_type) self.assertTrue(f.__doc__ in f_.__doc__) self.assertEqual(f, f_.func) self.assertEqual(return_type, f_.returnType) class F(object): """Identity""" def __call__(self, x): return x f = F() return_type = IntegerType() f_ = udf(f, return_type) self.assertTrue(f.__doc__ in f_.__doc__) self.assertEqual(f, f_.func) self.assertEqual(return_type, f_.returnType) f = functools.partial(f, x=1) return_type = IntegerType() f_ = udf(f, return_type) self.assertTrue(f.__doc__ in f_.__doc__) self.assertEqual(f, f_.func) self.assertEqual(return_type, f_.returnType) def test_validate_column_types(self): from pyspark.sql.functions import udf, to_json from pyspark.sql.column import _to_java_column self.assertTrue("Column" in _to_java_column("a").getClass().toString()) self.assertTrue("Column" in _to_java_column(u"a").getClass().toString()) self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString()) self.assertRaisesRegexp( TypeError, "Invalid argument, not a string or column", lambda: _to_java_column(1)) class A(): pass self.assertRaises(TypeError, lambda: _to_java_column(A())) self.assertRaises(TypeError, lambda: _to_java_column([])) self.assertRaisesRegexp( TypeError, "Invalid argument, not a string or column", lambda: udf(lambda x: x)(None)) self.assertRaises(TypeError, lambda: to_json(1)) def test_basic_functions(self): rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}']) df = self.spark.read.json(rdd) df.count() df.collect() df.schema # cache and checkpoint self.assertFalse(df.is_cached) df.persist() df.unpersist(True) df.cache() self.assertTrue(df.is_cached) self.assertEqual(2, df.count()) df.createOrReplaceTempView("temp") df = self.spark.sql("select foo from temp") df.count() df.collect() def test_apply_schema_to_row(self): df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""])) df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema) self.assertEqual(df.collect(), df2.collect()) rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x)) df3 = self.spark.createDataFrame(rdd, df.schema) self.assertEqual(10, df3.count()) def test_infer_schema_to_local(self): input = [{"a": 1}, {"b": "coffee"}] rdd = self.sc.parallelize(input) df = self.spark.createDataFrame(input) df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0) self.assertEqual(df.schema, df2.schema) rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None)) df3 = self.spark.createDataFrame(rdd, df.schema) self.assertEqual(10, df3.count()) def test_apply_schema_to_dict_and_rows(self): schema = StructType().add("b", StringType()).add("a", IntegerType()) input = [{"a": 1}, {"b": "coffee"}] rdd = self.sc.parallelize(input) for verify in [False, True]: df = self.spark.createDataFrame(input, schema, verifySchema=verify) df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify) self.assertEqual(df.schema, df2.schema) rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None)) df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify) self.assertEqual(10, df3.count()) input = [Row(a=x, b=str(x)) for x in range(10)] df4 = self.spark.createDataFrame(input, schema, verifySchema=verify) self.assertEqual(10, df4.count()) def test_create_dataframe_schema_mismatch(self): input = [Row(a=1)] rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i)) schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())]) df = self.spark.createDataFrame(rdd, schema) self.assertRaises(Exception, lambda: df.show()) def test_serialize_nested_array_and_map(self): d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})] rdd = self.sc.parallelize(d) df = self.spark.createDataFrame(rdd) row = df.head() self.assertEqual(1, len(row.l)) self.assertEqual(1, row.l[0].a) self.assertEqual("2", row.d["key"].d) l = df.rdd.map(lambda x: x.l).first() self.assertEqual(1, len(l)) self.assertEqual('s', l[0].b) d = df.rdd.map(lambda x: x.d).first() self.assertEqual(1, len(d)) self.assertEqual(1.0, d["key"].c) row = df.rdd.map(lambda x: x.d["key"]).first() self.assertEqual(1.0, row.c) self.assertEqual("2", row.d) def test_infer_schema(self): d = [Row(l=[], d={}, s=None), Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")] rdd = self.sc.parallelize(d) df = self.spark.createDataFrame(rdd) self.assertEqual([], df.rdd.map(lambda r: r.l).first()) self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect()) df.createOrReplaceTempView("test") result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'") self.assertEqual(1, result.head()[0]) df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0) self.assertEqual(df.schema, df2.schema) self.assertEqual({}, df2.rdd.map(lambda r: r.d).first()) self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect()) df2.createOrReplaceTempView("test2") result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'") self.assertEqual(1, result.head()[0]) def test_infer_schema_not_enough_names(self): df = self.spark.createDataFrame([["a", "b"]], ["col1"]) self.assertEqual(df.columns, ['col1', '_2']) def test_infer_schema_fails(self): with self.assertRaisesRegexp(TypeError, 'field a'): self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]), schema=["a", "b"], samplingRatio=0.99) def test_infer_nested_schema(self): NestedRow = Row("f1", "f2") nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}), NestedRow([2, 3], {"row2": 2.0})]) df = self.spark.createDataFrame(nestedRdd1) self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0]) nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]), NestedRow([[2, 3], [3, 4]], [2, 3])]) df = self.spark.createDataFrame(nestedRdd2) self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0]) from collections import namedtuple CustomRow = namedtuple('CustomRow', 'field1 field2') rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"), CustomRow(field1=2, field2="row2"), CustomRow(field1=3, field2="row3")]) df = self.spark.createDataFrame(rdd) self.assertEqual(Row(field1=1, field2=u'row1'), df.first()) def test_create_dataframe_from_dict_respects_schema(self): df = self.spark.createDataFrame([{'a': 1}], ["b"]) self.assertEqual(df.columns, ['b']) def test_create_dataframe_from_objects(self): data = [MyObject(1, "1"), MyObject(2, "2")] df = self.spark.createDataFrame(data) self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")]) self.assertEqual(df.first(), Row(key=1, value="1")) def test_select_null_literal(self): df = self.spark.sql("select null as col") self.assertEqual(Row(col=None), df.first()) def test_apply_schema(self): from datetime import date, datetime rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, (2,), [1, 2, 3], None)]) schema = StructType([ StructField("byte1", ByteType(), False), StructField("byte2", ByteType(), False), StructField("short1", ShortType(), False), StructField("short2", ShortType(), False), StructField("int1", IntegerType(), False), StructField("float1", FloatType(), False), StructField("date1", DateType(), False), StructField("time1", TimestampType(), False), StructField("map1", MapType(StringType(), IntegerType(), False), False), StructField("struct1", StructType([StructField("b", ShortType(), False)]), False), StructField("list1", ArrayType(ByteType(), False), False), StructField("null1", DoubleType(), True)]) df = self.spark.createDataFrame(rdd, schema) results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1, x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1)) r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None) self.assertEqual(r, results.first()) df.createOrReplaceTempView("table2") r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " + "short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " + "float1 + 1.5 as float1 FROM table2").first() self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r)) def test_struct_in_map(self): d = [Row(m={Row(i=1): Row(s="")})] df = self.sc.parallelize(d).toDF() k, v = list(df.head().m.items())[0] self.assertEqual(1, k.i) self.assertEqual("", v.s) def test_convert_row_to_dict(self): row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}) self.assertEqual(1, row.asDict()['l'][0].a) df = self.sc.parallelize([row]).toDF() df.createOrReplaceTempView("test") row = self.spark.sql("select l, d from test").head() self.assertEqual(1, row.asDict()["l"][0].a) self.assertEqual(1.0, row.asDict()['d']['key'].c) def test_udt(self): from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier from pyspark.sql.tests import ExamplePointUDT, ExamplePoint def check_datatype(datatype): pickled = pickle.loads(pickle.dumps(datatype)) assert datatype == pickled scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json()) python_datatype = _parse_datatype_json_string(scala_datatype.json()) assert datatype == python_datatype check_datatype(ExamplePointUDT()) structtype_with_udt = StructType([StructField("label", DoubleType(), False), StructField("point", ExamplePointUDT(), False)]) check_datatype(structtype_with_udt) p = ExamplePoint(1.0, 2.0) self.assertEqual(_infer_type(p), ExamplePointUDT()) _make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0)) self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0])) check_datatype(PythonOnlyUDT()) structtype_with_udt = StructType([StructField("label", DoubleType(), False), StructField("point", PythonOnlyUDT(), False)]) check_datatype(structtype_with_udt) p = PythonOnlyPoint(1.0, 2.0) self.assertEqual(_infer_type(p), PythonOnlyUDT()) _make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0)) self.assertRaises( ValueError, lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0])) def test_simple_udt_in_df(self): schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT()) df = self.spark.createDataFrame( [(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)], schema=schema) df.show() def test_nested_udt_in_df(self): schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT())) df = self.spark.createDataFrame( [(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)], schema=schema) df.collect() schema = StructType().add("key", LongType()).add("val", MapType(LongType(), PythonOnlyUDT())) df = self.spark.createDataFrame( [(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)], schema=schema) df.collect() def test_complex_nested_udt_in_df(self): from pyspark.sql.functions import udf schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT()) df = self.spark.createDataFrame( [(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)], schema=schema) df.collect() gd = df.groupby("key").agg({"val": "collect_list"}) gd.collect() udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema)) gd.select(udf(*gd)).collect() def test_udt_with_none(self): df = self.spark.range(0, 10, 1, 1) def myudf(x): if x > 0: return PythonOnlyPoint(float(x), float(x)) self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT()) rows = [r[0] for r in df.selectExpr("udf(id)").take(2)] self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)]) def test_nonparam_udf_with_aggregate(self): import pyspark.sql.functions as f df = self.spark.createDataFrame([(1, 2), (1, 2)]) f_udf = f.udf(lambda: "const_str") rows = df.distinct().withColumn("a", f_udf()).collect() self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')]) def test_infer_schema_with_udt(self): from pyspark.sql.tests import ExamplePoint, ExamplePointUDT row = Row(label=1.0, point=ExamplePoint(1.0, 2.0)) df = self.spark.createDataFrame([row]) schema = df.schema field = [f for f in schema.fields if f.name == "point"][0] self.assertEqual(type(field.dataType), ExamplePointUDT) df.createOrReplaceTempView("labeled_point") point = self.spark.sql("SELECT point FROM labeled_point").head().point self.assertEqual(point, ExamplePoint(1.0, 2.0)) row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0)) df = self.spark.createDataFrame([row]) schema = df.schema field = [f for f in schema.fields if f.name == "point"][0] self.assertEqual(type(field.dataType), PythonOnlyUDT) df.createOrReplaceTempView("labeled_point") point = self.spark.sql("SELECT point FROM labeled_point").head().point self.assertEqual(point, PythonOnlyPoint(1.0, 2.0)) def test_apply_schema_with_udt(self): from pyspark.sql.tests import ExamplePoint, ExamplePointUDT row = (1.0, ExamplePoint(1.0, 2.0)) schema = StructType([StructField("label", DoubleType(), False), StructField("point", ExamplePointUDT(), False)]) df = self.spark.createDataFrame([row], schema) point = df.head().point self.assertEqual(point, ExamplePoint(1.0, 2.0)) row = (1.0, PythonOnlyPoint(1.0, 2.0)) schema = StructType([StructField("label", DoubleType(), False), StructField("point", PythonOnlyUDT(), False)]) df = self.spark.createDataFrame([row], schema) point = df.head().point self.assertEqual(point, PythonOnlyPoint(1.0, 2.0)) def test_udf_with_udt(self): from pyspark.sql.tests import ExamplePoint, ExamplePointUDT row = Row(label=1.0, point=ExamplePoint(1.0, 2.0)) df = self.spark.createDataFrame([row]) self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first()) udf = UserDefinedFunction(lambda p: p.y, DoubleType()) self.assertEqual(2.0, df.select(udf(df.point)).first()[0]) udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT()) self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0]) row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0)) df = self.spark.createDataFrame([row]) self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first()) udf = UserDefinedFunction(lambda p: p.y, DoubleType()) self.assertEqual(2.0, df.select(udf(df.point)).first()[0]) udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT()) self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0]) def test_parquet_with_udt(self): from pyspark.sql.tests import ExamplePoint, ExamplePointUDT row = Row(label=1.0, point=ExamplePoint(1.0, 2.0)) df0 = self.spark.createDataFrame([row]) output_dir = os.path.join(self.tempdir.name, "labeled_point") df0.write.parquet(output_dir) df1 = self.spark.read.parquet(output_dir) point = df1.head().point self.assertEqual(point, ExamplePoint(1.0, 2.0)) row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0)) df0 = self.spark.createDataFrame([row]) df0.write.parquet(output_dir, mode='overwrite') df1 = self.spark.read.parquet(output_dir) point = df1.head().point self.assertEqual(point, PythonOnlyPoint(1.0, 2.0)) def test_union_with_udt(self): from pyspark.sql.tests import ExamplePoint, ExamplePointUDT row1 = (1.0, ExamplePoint(1.0, 2.0)) row2 = (2.0, ExamplePoint(3.0, 4.0)) schema = StructType([StructField("label", DoubleType(), False), StructField("point", ExamplePointUDT(), False)]) df1 = self.spark.createDataFrame([row1], schema) df2 = self.spark.createDataFrame([row2], schema) result = df1.union(df2).orderBy("label").collect() self.assertEqual( result, [ Row(label=1.0, point=ExamplePoint(1.0, 2.0)), Row(label=2.0, point=ExamplePoint(3.0, 4.0)) ] ) def test_cast_to_string_with_udt(self): from pyspark.sql.tests import ExamplePointUDT, ExamplePoint from pyspark.sql.functions import col row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0)) schema = StructType([StructField("point", ExamplePointUDT(), False), StructField("pypoint", PythonOnlyUDT(), False)]) df = self.spark.createDataFrame([row], schema) result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head() self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]')) def test_column_operators(self): ci = self.df.key cs = self.df.value c = ci == cs self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column)) rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1) self.assertTrue(all(isinstance(c, Column) for c in rcc)) cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7] self.assertTrue(all(isinstance(c, Column) for c in cb)) cbool = (ci & ci), (ci | ci), (~ci) self.assertTrue(all(isinstance(c, Column) for c in cbool)) css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\ cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs) self.assertTrue(all(isinstance(c, Column) for c in css)) self.assertTrue(isinstance(ci.cast(LongType()), Column)) self.assertRaisesRegexp(ValueError, "Cannot apply 'in' operator against a column", lambda: 1 in cs) def test_column_getitem(self): from pyspark.sql.functions import col self.assertIsInstance(col("foo")[1:3], Column) self.assertIsInstance(col("foo")[0], Column) self.assertIsInstance(col("foo")["bar"], Column) self.assertRaises(ValueError, lambda: col("foo")[0:10:2]) def test_column_select(self): df = self.df self.assertEqual(self.testData, df.select("*").collect()) self.assertEqual(self.testData, df.select(df.key, df.value).collect()) self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect()) def test_freqItems(self): vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)] df = self.sc.parallelize(vals).toDF() items = df.stat.freqItems(("a", "b"), 0.4).collect()[0] self.assertTrue(1 in items[0]) self.assertTrue(-2.0 in items[1]) def test_aggregator(self): df = self.df g = df.groupBy() self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0])) self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect()) from pyspark.sql import functions self.assertEqual((0, u'99'), tuple(g.agg(functions.first(df.key), functions.last(df.value)).first())) self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0]) self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0]) def test_first_last_ignorenulls(self): from pyspark.sql import functions df = self.spark.range(0, 100) df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id")) df3 = df2.select(functions.first(df2.id, False).alias('a'), functions.first(df2.id, True).alias('b'), functions.last(df2.id, False).alias('c'), functions.last(df2.id, True).alias('d')) self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect()) def test_approxQuantile(self): df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF() for f in ["a", u"a"]: aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1) self.assertTrue(isinstance(aq, list)) self.assertEqual(len(aq), 3) self.assertTrue(all(isinstance(q, float) for q in aq)) aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1) self.assertTrue(isinstance(aqs, list)) self.assertEqual(len(aqs), 2) self.assertTrue(isinstance(aqs[0], list)) self.assertEqual(len(aqs[0]), 3) self.assertTrue(all(isinstance(q, float) for q in aqs[0])) self.assertTrue(isinstance(aqs[1], list)) self.assertEqual(len(aqs[1]), 3) self.assertTrue(all(isinstance(q, float) for q in aqs[1])) aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1) self.assertTrue(isinstance(aqt, list)) self.assertEqual(len(aqt), 2) self.assertTrue(isinstance(aqt[0], list)) self.assertEqual(len(aqt[0]), 3) self.assertTrue(all(isinstance(q, float) for q in aqt[0])) self.assertTrue(isinstance(aqt[1], list)) self.assertEqual(len(aqt[1]), 3) self.assertTrue(all(isinstance(q, float) for q in aqt[1])) self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1)) self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1)) self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1)) def test_corr(self): import math df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF() corr = df.stat.corr(u"a", "b") self.assertTrue(abs(corr - 0.95734012) < 1e-6) def test_sampleby(self): df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF() sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0) self.assertTrue(sampled.count() == 3) def test_cov(self): df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF() cov = df.stat.cov(u"a", "b") self.assertTrue(abs(cov - 55.0 / 3) < 1e-6) def test_crosstab(self): df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF() ct = df.stat.crosstab(u"a", "b").collect() ct = sorted(ct, key=lambda x: x[0]) for i, row in enumerate(ct): self.assertEqual(row[0], str(i)) self.assertTrue(row[1], 1) self.assertTrue(row[2], 1) def test_math_functions(self): df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF() from pyspark.sql import functions import math def get_values(l): return [j[0] for j in l] def assert_close(a, b): c = get_values(b) diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)] return sum(diff) == len(a) assert_close([math.cos(i) for i in range(10)], df.select(functions.cos(df.a)).collect()) assert_close([math.cos(i) for i in range(10)], df.select(functions.cos("a")).collect()) assert_close([math.sin(i) for i in range(10)], df.select(functions.sin(df.a)).collect()) assert_close([math.sin(i) for i in range(10)], df.select(functions.sin(df['a'])).collect()) assert_close([math.pow(i, 2 * i) for i in range(10)], df.select(functions.pow(df.a, df.b)).collect()) assert_close([math.pow(i, 2) for i in range(10)], df.select(functions.pow(df.a, 2)).collect()) assert_close([math.pow(i, 2) for i in range(10)], df.select(functions.pow(df.a, 2.0)).collect()) assert_close([math.hypot(i, 2 * i) for i in range(10)], df.select(functions.hypot(df.a, df.b)).collect()) def test_rand_functions(self): df = self.df from pyspark.sql import functions rnd = df.select('key', functions.rand()).collect() for row in rnd: assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1] rndn = df.select('key', functions.randn(5)).collect() for row in rndn: assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1] # If the specified seed is 0, we should use it. # https://issues.apache.org/jira/browse/SPARK-9691 rnd1 = df.select('key', functions.rand(0)).collect() rnd2 = df.select('key', functions.rand(0)).collect() self.assertEqual(sorted(rnd1), sorted(rnd2)) rndn1 = df.select('key', functions.randn(0)).collect() rndn2 = df.select('key', functions.randn(0)).collect() self.assertEqual(sorted(rndn1), sorted(rndn2)) def test_string_functions(self): from pyspark.sql.functions import col, lit df = self.spark.createDataFrame([['nick']], schema=['name']) self.assertRaisesRegexp( TypeError, "must be the same type", lambda: df.select(col('name').substr(0, lit(1)))) if sys.version_info.major == 2: self.assertRaises( TypeError, lambda: df.select(col('name').substr(long(0), long(1)))) def test_array_contains_function(self): from pyspark.sql.functions import array_contains df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data']) actual = df.select(array_contains(df.data, 1).alias('b')).collect() # The value argument can be implicitly castable to the element's type of the array. self.assertEqual([Row(b=True), Row(b=False)], actual) def test_between_function(self): df = self.sc.parallelize([ Row(a=1, b=2, c=3), Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)]).toDF() self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)], df.filter(df.a.between(df.b, df.c)).collect()) def test_struct_type(self): struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) struct2 = StructType([StructField("f1", StringType(), True), StructField("f2", StringType(), True, None)]) self.assertEqual(struct1.fieldNames(), struct2.names) self.assertEqual(struct1, struct2) struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) struct2 = StructType([StructField("f1", StringType(), True)]) self.assertNotEqual(struct1.fieldNames(), struct2.names) self.assertNotEqual(struct1, struct2) struct1 = (StructType().add(StructField("f1", StringType(), True)) .add(StructField("f2", StringType(), True, None))) struct2 = StructType([StructField("f1", StringType(), True), StructField("f2", StringType(), True, None)]) self.assertEqual(struct1.fieldNames(), struct2.names) self.assertEqual(struct1, struct2) struct1 = (StructType().add(StructField("f1", StringType(), True)) .add(StructField("f2", StringType(), True, None))) struct2 = StructType([StructField("f1", StringType(), True)]) self.assertNotEqual(struct1.fieldNames(), struct2.names) self.assertNotEqual(struct1, struct2) # Catch exception raised during improper construction self.assertRaises(ValueError, lambda: StructType().add("name")) struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) for field in struct1: self.assertIsInstance(field, StructField) struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) self.assertEqual(len(struct1), 2) struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) self.assertIs(struct1["f1"], struct1.fields[0]) self.assertIs(struct1[0], struct1.fields[0]) self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1])) self.assertRaises(KeyError, lambda: struct1["f9"]) self.assertRaises(IndexError, lambda: struct1[9]) self.assertRaises(TypeError, lambda: struct1[9.9]) def test_parse_datatype_string(self): from pyspark.sql.types import _all_atomic_types, _parse_datatype_string for k, t in _all_atomic_types.items(): if t != NullType: self.assertEqual(t(), _parse_datatype_string(k)) self.assertEqual(IntegerType(), _parse_datatype_string("int")) self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)")) self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )")) self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)")) self.assertEqual( ArrayType(IntegerType()), _parse_datatype_string("array<int >")) self.assertEqual( MapType(IntegerType(), DoubleType()), _parse_datatype_string("map< int, double >")) self.assertEqual( StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]), _parse_datatype_string("struct<a:int, c:double >")) self.assertEqual( StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]), _parse_datatype_string("a:int, c:double")) self.assertEqual( StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]), _parse_datatype_string("a INT, c DOUBLE")) def test_metadata_null(self): schema = StructType([StructField("f1", StringType(), True, None), StructField("f2", StringType(), True, {'a': None})]) rdd = self.sc.parallelize([["a", "b"], ["c", "d"]]) self.spark.createDataFrame(rdd, schema) def test_save_and_load(self): df = self.df tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) df.write.json(tmpPath) actual = self.spark.read.json(tmpPath) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) schema = StructType([StructField("value", StringType(), True)]) actual = self.spark.read.json(tmpPath, schema) self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect())) df.write.json(tmpPath, "overwrite") actual = self.spark.read.json(tmpPath) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) df.write.save(format="json", mode="overwrite", path=tmpPath, noUse="this options will not be used in save.") actual = self.spark.read.load(format="json", path=tmpPath, noUse="this options will not be used in load.") self.assertEqual(sorted(df.collect()), sorted(actual.collect())) defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default", "org.apache.spark.sql.parquet") self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json") actual = self.spark.read.load(path=tmpPath) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName) csvpath = os.path.join(tempfile.mkdtemp(), 'data') df.write.option('quote', None).format('csv').save(csvpath) shutil.rmtree(tmpPath) def test_save_and_load_builder(self): df = self.df tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) df.write.json(tmpPath) actual = self.spark.read.json(tmpPath) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) schema = StructType([StructField("value", StringType(), True)]) actual = self.spark.read.json(tmpPath, schema) self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect())) df.write.mode("overwrite").json(tmpPath) actual = self.spark.read.json(tmpPath) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) df.write.mode("overwrite").options(noUse="this options will not be used in save.")\ .option("noUse", "this option will not be used in save.")\ .format("json").save(path=tmpPath) actual =\ self.spark.read.format("json")\ .load(path=tmpPath, noUse="this options will not be used in load.") self.assertEqual(sorted(df.collect()), sorted(actual.collect())) defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default", "org.apache.spark.sql.parquet") self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json") actual = self.spark.read.load(path=tmpPath) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName) shutil.rmtree(tmpPath) def test_stream_trigger(self): df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') # Should take at least one arg try: df.writeStream.trigger() except ValueError: pass # Should not take multiple args try: df.writeStream.trigger(once=True, processingTime='5 seconds') except ValueError: pass # Should not take multiple args try: df.writeStream.trigger(processingTime='5 seconds', continuous='1 second') except ValueError: pass # Should take only keyword args try: df.writeStream.trigger('5 seconds') self.fail("Should have thrown an exception") except TypeError: pass def test_stream_read_options(self): schema = StructType([StructField("data", StringType(), False)]) df = self.spark.readStream\ .format('text')\ .option('path', 'python/test_support/sql/streaming')\ .schema(schema)\ .load() self.assertTrue(df.isStreaming) self.assertEqual(df.schema.simpleString(), "struct<data:string>") def test_stream_read_options_overwrite(self): bad_schema = StructType([StructField("test", IntegerType(), False)]) schema = StructType([StructField("data", StringType(), False)]) df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \ .schema(bad_schema)\ .load(path='python/test_support/sql/streaming', schema=schema, format='text') self.assertTrue(df.isStreaming) self.assertEqual(df.schema.simpleString(), "struct<data:string>") def test_stream_save_options(self): df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \ .withColumn('id', lit(1)) for q in self.spark._wrapped.streams.active: q.stop() tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) self.assertTrue(df.isStreaming) out = os.path.join(tmpPath, 'out') chk = os.path.join(tmpPath, 'chk') q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \ .format('parquet').partitionBy('id').outputMode('append').option('path', out).start() try: self.assertEqual(q.name, 'this_query') self.assertTrue(q.isActive) q.processAllAvailable() output_files = [] for _, _, files in os.walk(out): output_files.extend([f for f in files if not f.startswith('.')]) self.assertTrue(len(output_files) > 0) self.assertTrue(len(os.listdir(chk)) > 0) finally: q.stop() shutil.rmtree(tmpPath) def test_stream_save_options_overwrite(self): df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') for q in self.spark._wrapped.streams.active: q.stop() tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) self.assertTrue(df.isStreaming) out = os.path.join(tmpPath, 'out') chk = os.path.join(tmpPath, 'chk') fake1 = os.path.join(tmpPath, 'fake1') fake2 = os.path.join(tmpPath, 'fake2') q = df.writeStream.option('checkpointLocation', fake1)\ .format('memory').option('path', fake2) \ .queryName('fake_query').outputMode('append') \ .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk) try: self.assertEqual(q.name, 'this_query') self.assertTrue(q.isActive) q.processAllAvailable() output_files = [] for _, _, files in os.walk(out): output_files.extend([f for f in files if not f.startswith('.')]) self.assertTrue(len(output_files) > 0) self.assertTrue(len(os.listdir(chk)) > 0) self.assertFalse(os.path.isdir(fake1)) # should not have been created self.assertFalse(os.path.isdir(fake2)) # should not have been created finally: q.stop() shutil.rmtree(tmpPath) def test_stream_status_and_progress(self): df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') for q in self.spark._wrapped.streams.active: q.stop() tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) self.assertTrue(df.isStreaming) out = os.path.join(tmpPath, 'out') chk = os.path.join(tmpPath, 'chk') def func(x): time.sleep(1) return x from pyspark.sql.functions import col, udf sleep_udf = udf(func) # Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there # were no updates. q = df.select(sleep_udf(col("value")).alias('value')).writeStream \ .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk) try: # "lastProgress" will return None in most cases. However, as it may be flaky when # Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress" # may throw error with a high chance and make this test flaky, so we should still be # able to detect broken codes. q.lastProgress q.processAllAvailable() lastProgress = q.lastProgress recentProgress = q.recentProgress status = q.status self.assertEqual(lastProgress['name'], q.name) self.assertEqual(lastProgress['id'], q.id) self.assertTrue(any(p == lastProgress for p in recentProgress)) self.assertTrue( "message" in status and "isDataAvailable" in status and "isTriggerActive" in status) finally: q.stop() shutil.rmtree(tmpPath) def test_stream_await_termination(self): df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') for q in self.spark._wrapped.streams.active: q.stop() tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) self.assertTrue(df.isStreaming) out = os.path.join(tmpPath, 'out') chk = os.path.join(tmpPath, 'chk') q = df.writeStream\ .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk) try: self.assertTrue(q.isActive) try: q.awaitTermination("hello") self.fail("Expected a value exception") except ValueError: pass now = time.time() # test should take at least 2 seconds res = q.awaitTermination(2.6) duration = time.time() - now self.assertTrue(duration >= 2) self.assertFalse(res) finally: q.stop() shutil.rmtree(tmpPath) def test_stream_exception(self): sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming') sq = sdf.writeStream.format('memory').queryName('query_explain').start() try: sq.processAllAvailable() self.assertEqual(sq.exception(), None) finally: sq.stop() from pyspark.sql.functions import col, udf from pyspark.sql.utils import StreamingQueryException bad_udf = udf(lambda x: 1 / 0) sq = sdf.select(bad_udf(col("value")))\ .writeStream\ .format('memory')\ .queryName('this_query')\ .start() try: # Process some data to fail the query sq.processAllAvailable() self.fail("bad udf should fail the query") except StreamingQueryException as e: # This is expected self.assertTrue("ZeroDivisionError" in e.desc) finally: sq.stop() self.assertTrue(type(sq.exception()) is StreamingQueryException) self.assertTrue("ZeroDivisionError" in sq.exception().desc) def test_query_manager_await_termination(self): df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') for q in self.spark._wrapped.streams.active: q.stop() tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) self.assertTrue(df.isStreaming) out = os.path.join(tmpPath, 'out') chk = os.path.join(tmpPath, 'chk') q = df.writeStream\ .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk) try: self.assertTrue(q.isActive) try: self.spark._wrapped.streams.awaitAnyTermination("hello") self.fail("Expected a value exception") except ValueError: pass now = time.time() # test should take at least 2 seconds res = self.spark._wrapped.streams.awaitAnyTermination(2.6) duration = time.time() - now self.assertTrue(duration >= 2) self.assertFalse(res) finally: q.stop() shutil.rmtree(tmpPath) def test_help_command(self): # Regression test for SPARK-5464 rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}']) df = self.spark.read.json(rdd) # render_doc() reproduces the help() exception without printing output pydoc.render_doc(df) pydoc.render_doc(df.foo) pydoc.render_doc(df.take(1)) def test_access_column(self): df = self.df self.assertTrue(isinstance(df.key, Column)) self.assertTrue(isinstance(df['key'], Column)) self.assertTrue(isinstance(df[0], Column)) self.assertRaises(IndexError, lambda: df[2]) self.assertRaises(AnalysisException, lambda: df["bad_key"]) self.assertRaises(TypeError, lambda: df[{}]) def test_column_name_with_non_ascii(self): if sys.version >= '3': columnName = "数量" self.assertTrue(isinstance(columnName, str)) else: columnName = unicode("数量", "utf-8") self.assertTrue(isinstance(columnName, unicode)) schema = StructType([StructField(columnName, LongType(), True)]) df = self.spark.createDataFrame([(1,)], schema) self.assertEqual(schema, df.schema) self.assertEqual("DataFrame[数量: bigint]", str(df)) self.assertEqual([("数量", 'bigint')], df.dtypes) self.assertEqual(1, df.select("数量").first()[0]) self.assertEqual(1, df.select(df["数量"]).first()[0]) def test_access_nested_types(self): df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF() self.assertEqual(1, df.select(df.l[0]).first()[0]) self.assertEqual(1, df.select(df.l.getItem(0)).first()[0]) self.assertEqual(1, df.select(df.r.a).first()[0]) self.assertEqual("b", df.select(df.r.getField("b")).first()[0]) self.assertEqual("v", df.select(df.d["k"]).first()[0]) self.assertEqual("v", df.select(df.d.getItem("k")).first()[0]) def test_field_accessor(self): df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF() self.assertEqual(1, df.select(df.l[0]).first()[0]) self.assertEqual(1, df.select(df.r["a"]).first()[0]) self.assertEqual(1, df.select(df["r.a"]).first()[0]) self.assertEqual("b", df.select(df.r["b"]).first()[0]) self.assertEqual("b", df.select(df["r.b"]).first()[0]) self.assertEqual("v", df.select(df.d["k"]).first()[0]) def test_infer_long_type(self): longrow = [Row(f1='a', f2=100000000000000)] df = self.sc.parallelize(longrow).toDF() self.assertEqual(df.schema.fields[1].dataType, LongType()) # this saving as Parquet caused issues as well. output_dir = os.path.join(self.tempdir.name, "infer_long_type") df.write.parquet(output_dir) df1 = self.spark.read.parquet(output_dir) self.assertEqual('a', df1.first().f1) self.assertEqual(100000000000000, df1.first().f2) self.assertEqual(_infer_type(1), LongType()) self.assertEqual(_infer_type(2**10), LongType()) self.assertEqual(_infer_type(2**20), LongType()) self.assertEqual(_infer_type(2**31 - 1), LongType()) self.assertEqual(_infer_type(2**31), LongType()) self.assertEqual(_infer_type(2**61), LongType()) self.assertEqual(_infer_type(2**71), LongType()) def test_merge_type(self): self.assertEqual(_merge_type(LongType(), NullType()), LongType()) self.assertEqual(_merge_type(NullType(), LongType()), LongType()) self.assertEqual(_merge_type(LongType(), LongType()), LongType()) self.assertEqual(_merge_type( ArrayType(LongType()), ArrayType(LongType()) ), ArrayType(LongType())) with self.assertRaisesRegexp(TypeError, 'element in array'): _merge_type(ArrayType(LongType()), ArrayType(DoubleType())) self.assertEqual(_merge_type( MapType(StringType(), LongType()), MapType(StringType(), LongType()) ), MapType(StringType(), LongType())) with self.assertRaisesRegexp(TypeError, 'key of map'): _merge_type( MapType(StringType(), LongType()), MapType(DoubleType(), LongType())) with self.assertRaisesRegexp(TypeError, 'value of map'): _merge_type( MapType(StringType(), LongType()), MapType(StringType(), DoubleType())) self.assertEqual(_merge_type( StructType([StructField("f1", LongType()), StructField("f2", StringType())]), StructType([StructField("f1", LongType()), StructField("f2", StringType())]) ), StructType([StructField("f1", LongType()), StructField("f2", StringType())])) with self.assertRaisesRegexp(TypeError, 'field f1'): _merge_type( StructType([StructField("f1", LongType()), StructField("f2", StringType())]), StructType([StructField("f1", DoubleType()), StructField("f2", StringType())])) self.assertEqual(_merge_type( StructType([StructField("f1", StructType([StructField("f2", LongType())]))]), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]) ), StructType([StructField("f1", StructType([StructField("f2", LongType())]))])) with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'): _merge_type( StructType([StructField("f1", StructType([StructField("f2", LongType())]))]), StructType([StructField("f1", StructType([StructField("f2", StringType())]))])) self.assertEqual(_merge_type( StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]) ), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])) with self.assertRaisesRegexp(TypeError, 'element in array field f1'): _merge_type( StructType([ StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]), StructType([ StructField("f1", ArrayType(DoubleType())), StructField("f2", StringType())])) self.assertEqual(_merge_type( StructType([ StructField("f1", MapType(StringType(), LongType())), StructField("f2", StringType())]), StructType([ StructField("f1", MapType(StringType(), LongType())), StructField("f2", StringType())]) ), StructType([ StructField("f1", MapType(StringType(), LongType())), StructField("f2", StringType())])) with self.assertRaisesRegexp(TypeError, 'value of map field f1'): _merge_type( StructType([ StructField("f1", MapType(StringType(), LongType())), StructField("f2", StringType())]), StructType([ StructField("f1", MapType(StringType(), DoubleType())), StructField("f2", StringType())])) self.assertEqual(_merge_type( StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]) ), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])) with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'): _merge_type( StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]), StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))]) ) def test_filter_with_datetime(self): time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000) date = time.date() row = Row(date=date, time=time) df = self.spark.createDataFrame([row]) self.assertEqual(1, df.filter(df.date == date).count()) self.assertEqual(1, df.filter(df.time == time).count()) self.assertEqual(0, df.filter(df.date > date).count()) self.assertEqual(0, df.filter(df.time > time).count()) def test_filter_with_datetime_timezone(self): dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0)) dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1)) row = Row(date=dt1) df = self.spark.createDataFrame([row]) self.assertEqual(0, df.filter(df.date == dt2).count()) self.assertEqual(1, df.filter(df.date > dt2).count()) self.assertEqual(0, df.filter(df.date < dt2).count()) def test_time_with_timezone(self): day = datetime.date.today() now = datetime.datetime.now() ts = time.mktime(now.timetuple()) # class in __main__ is not serializable from pyspark.sql.tests import UTCOffsetTimezone utc = UTCOffsetTimezone() utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds # add microseconds to utcnow (keeping year,month,day,hour,minute,second) utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc))) df = self.spark.createDataFrame([(day, now, utcnow)]) day1, now1, utcnow1 = df.first() self.assertEqual(day1, day) self.assertEqual(now, now1) self.assertEqual(now, utcnow1) # regression test for SPARK-19561 def test_datetime_at_epoch(self): epoch = datetime.datetime.fromtimestamp(0) df = self.spark.createDataFrame([Row(date=epoch)]) first = df.select('date', lit(epoch).alias('lit_date')).first() self.assertEqual(first['date'], epoch) self.assertEqual(first['lit_date'], epoch) def test_dayofweek(self): from pyspark.sql.functions import dayofweek dt = datetime.datetime(2017, 11, 6) df = self.spark.createDataFrame([Row(date=dt)]) row = df.select(dayofweek(df.date)).first() self.assertEqual(row[0], 2) def test_decimal(self): from decimal import Decimal schema = StructType([StructField("decimal", DecimalType(10, 5))]) df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema) row = df.select(df.decimal + 1).first() self.assertEqual(row[0], Decimal("4.14159")) tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) df.write.parquet(tmpPath) df2 = self.spark.read.parquet(tmpPath) row = df2.first() self.assertEqual(row[0], Decimal("3.14159")) def test_dropna(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True)]) # shouldn't drop a non-null row self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, 80.1)], schema).dropna().count(), 1) # dropping rows with a single null value self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna().count(), 0) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna(how='any').count(), 0) # if how = 'all', only drop rows if all values are null self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna(how='all').count(), 1) self.assertEqual(self.spark.createDataFrame( [(None, None, None)], schema).dropna(how='all').count(), 0) # how and subset self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(), 1) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(), 0) # threshold self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(), 1) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, None)], schema).dropna(thresh=2).count(), 0) # threshold and subset self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(), 1) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(), 0) # thresh should take precedence over how self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, None)], schema).dropna( how='any', thresh=2, subset=['name', 'age']).count(), 1) def test_fillna(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True), StructField("spy", BooleanType(), True)]) # fillna shouldn't change non-null values row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first() self.assertEqual(row.age, 10) # fillna with int row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first() self.assertEqual(row.age, 50) self.assertEqual(row.height, 50.0) # fillna with double row = self.spark.createDataFrame( [(u'Alice', None, None, None)], schema).fillna(50.1).first() self.assertEqual(row.age, 50) self.assertEqual(row.height, 50.1) # fillna with bool row = self.spark.createDataFrame( [(u'Alice', None, None, None)], schema).fillna(True).first() self.assertEqual(row.age, None) self.assertEqual(row.spy, True) # fillna with string row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first() self.assertEqual(row.name, u"hello") self.assertEqual(row.age, None) # fillna with subset specified for numeric cols row = self.spark.createDataFrame( [(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first() self.assertEqual(row.name, None) self.assertEqual(row.age, 50) self.assertEqual(row.height, None) self.assertEqual(row.spy, None) # fillna with subset specified for string cols row = self.spark.createDataFrame( [(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first() self.assertEqual(row.name, "haha") self.assertEqual(row.age, None) self.assertEqual(row.height, None) self.assertEqual(row.spy, None) # fillna with subset specified for bool cols row = self.spark.createDataFrame( [(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first() self.assertEqual(row.name, None) self.assertEqual(row.age, None) self.assertEqual(row.height, None) self.assertEqual(row.spy, True) # fillna with dictionary for boolean types row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first() self.assertEqual(row.a, True) def test_bitwise_operations(self): from pyspark.sql import functions row = Row(a=170, b=75) df = self.spark.createDataFrame([row]) result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict() self.assertEqual(170 & 75, result['(a & b)']) result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict() self.assertEqual(170 | 75, result['(a | b)']) result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict() self.assertEqual(170 ^ 75, result['(a ^ b)']) result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict() self.assertEqual(~75, result['~b']) def test_expr(self): from pyspark.sql import functions row = Row(a="length string", b=75) df = self.spark.createDataFrame([row]) result = df.select(functions.expr("length(a)")).collect()[0].asDict() self.assertEqual(13, result["length(a)"]) def test_repartitionByRange_dataframe(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True)]) df1 = self.spark.createDataFrame( [(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema) df2 = self.spark.createDataFrame( [(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema) # test repartitionByRange(numPartitions, *cols) df3 = df1.repartitionByRange(2, "name", "age") self.assertEqual(df3.rdd.getNumPartitions(), 2) self.assertEqual(df3.rdd.first(), df2.rdd.first()) self.assertEqual(df3.rdd.take(3), df2.rdd.take(3)) # test repartitionByRange(numPartitions, *cols) df4 = df1.repartitionByRange(3, "name", "age") self.assertEqual(df4.rdd.getNumPartitions(), 3) self.assertEqual(df4.rdd.first(), df2.rdd.first()) self.assertEqual(df4.rdd.take(3), df2.rdd.take(3)) # test repartitionByRange(*cols) df5 = df1.repartitionByRange("name", "age") self.assertEqual(df5.rdd.first(), df2.rdd.first()) self.assertEqual(df5.rdd.take(3), df2.rdd.take(3)) def test_replace(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True)]) # replace with int row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first() self.assertEqual(row.age, 20) self.assertEqual(row.height, 20.0) # replace with double row = self.spark.createDataFrame( [(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first() self.assertEqual(row.age, 82) self.assertEqual(row.height, 82.1) # replace with string row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first() self.assertEqual(row.name, u"Ann") self.assertEqual(row.age, 10) # replace with subset specified by a string of a column name w/ actual change row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first() self.assertEqual(row.age, 20) # replace with subset specified by a string of a column name w/o actual change row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first() self.assertEqual(row.age, 10) # replace with subset specified with one column replaced, another column not in subset # stays unchanged. row = self.spark.createDataFrame( [(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first() self.assertEqual(row.name, u'Alice') self.assertEqual(row.age, 20) self.assertEqual(row.height, 10.0) # replace with subset specified but no column will be replaced row = self.spark.createDataFrame( [(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first() self.assertEqual(row.name, u'Alice') self.assertEqual(row.age, 10) self.assertEqual(row.height, None) # replace with lists row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first() self.assertTupleEqual(row, (u'Ann', 10, 80.1)) # replace with dict row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({10: 11}).first() self.assertTupleEqual(row, (u'Alice', 11, 80.1)) # test backward compatibility with dummy value dummy_value = 1 row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first() self.assertTupleEqual(row, (u'Bob', 10, 80.1)) # test dict with mixed numerics row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first() self.assertTupleEqual(row, (u'Alice', -10, 90.5)) # replace with tuples row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first() self.assertTupleEqual(row, (u'Bob', 10, 80.1)) # replace multiple columns row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first() self.assertTupleEqual(row, (u'Alice', 20, 90.0)) # test for mixed numerics row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first() self.assertTupleEqual(row, (u'Alice', 20, 90.5)) row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first() self.assertTupleEqual(row, (u'Alice', 20, 90.5)) # replace with boolean row = (self .spark.createDataFrame([(u'Alice', 10, 80.0)], schema) .selectExpr("name = 'Bob'", 'age <= 15') .replace(False, True).first()) self.assertTupleEqual(row, (True, True)) # replace string with None and then drop None rows row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna() self.assertEqual(row.count(), 0) # replace with number and None row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first() self.assertTupleEqual(row, (u'Alice', 20, None)) # should fail if subset is not list, tuple or None with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first() # should fail if to_replace and value have different length with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first() # should fail if when received unexpected type with self.assertRaises(ValueError): from datetime import datetime self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first() # should fail if provided mixed type replacements with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first() with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first() with self.assertRaisesRegexp( TypeError, 'value argument is required when to_replace is not a dictionary.'): self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first() def test_capture_analysis_exception(self): self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc")) self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b")) def test_capture_parse_exception(self): self.assertRaises(ParseException, lambda: self.spark.sql("abc")) def test_capture_illegalargument_exception(self): self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks", lambda: self.spark.sql("SET mapred.reduce.tasks=-1")) df = self.spark.createDataFrame([(1, 2)], ["a", "b"]) self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values", lambda: df.select(sha2(df.a, 1024)).collect()) try: df.select(sha2(df.a, 1024)).collect() except IllegalArgumentException as e: self.assertRegexpMatches(e.desc, "1024 is not in the permitted values") self.assertRegexpMatches(e.stackTrace, "org.apache.spark.sql.functions") def test_with_column_with_existing_name(self): keys = self.df.withColumn("key", self.df.key).select("key").collect() self.assertEqual([r.key for r in keys], list(range(100))) # regression test for SPARK-10417 def test_column_iterator(self): def foo(): for x in self.df.key: break self.assertRaises(TypeError, foo) # add test for SPARK-10577 (test broadcast join hint) def test_functions_broadcast(self): from pyspark.sql.functions import broadcast df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value")) df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value")) # equijoin - should be converted into broadcast join plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan() self.assertEqual(1, plan1.toString().count("BroadcastHashJoin")) # no join key -- should not be a broadcast join plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan() self.assertEqual(0, plan2.toString().count("BroadcastHashJoin")) # planner should not crash without a join broadcast(df1)._jdf.queryExecution().executedPlan() def test_generic_hints(self): from pyspark.sql import DataFrame df1 = self.spark.range(10e10).toDF("id") df2 = self.spark.range(10e10).toDF("id") self.assertIsInstance(df1.hint("broadcast"), DataFrame) self.assertIsInstance(df1.hint("broadcast", []), DataFrame) # Dummy rules self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame) self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame) plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan() self.assertEqual(1, plan.toString().count("BroadcastHashJoin")) def test_sample(self): self.assertRaisesRegexp( TypeError, "should be a bool, float and number", lambda: self.spark.range(1).sample()) self.assertRaises( TypeError, lambda: self.spark.range(1).sample("a")) self.assertRaises( TypeError, lambda: self.spark.range(1).sample(seed="abc")) self.assertRaises( IllegalArgumentException, lambda: self.spark.range(1).sample(-1.0)) def test_toDF_with_schema_string(self): data = [Row(key=i, value=str(i)) for i in range(100)] rdd = self.sc.parallelize(data, 5) df = rdd.toDF("key: int, value: string") self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>") self.assertEqual(df.collect(), data) # different but compatible field types can be used. df = rdd.toDF("key: string, value: string") self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>") self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)]) # field names can differ. df = rdd.toDF(" a: int, b: string ") self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>") self.assertEqual(df.collect(), data) # number of fields must match. self.assertRaisesRegexp(Exception, "Length of object", lambda: rdd.toDF("key: int").collect()) # field types mismatch will cause exception at runtime. self.assertRaisesRegexp(Exception, "FloatType can not accept", lambda: rdd.toDF("key: float, value: string").collect()) # flat schema values will be wrapped into row. df = rdd.map(lambda row: row.key).toDF("int") self.assertEqual(df.schema.simpleString(), "struct<value:int>") self.assertEqual(df.collect(), [Row(key=i) for i in range(100)]) # users can use DataType directly instead of data type string. df = rdd.map(lambda row: row.key).toDF(IntegerType()) self.assertEqual(df.schema.simpleString(), "struct<value:int>") self.assertEqual(df.collect(), [Row(key=i) for i in range(100)]) def test_join_without_on(self): df1 = self.spark.range(1).toDF("a") df2 = self.spark.range(1).toDF("b") with self.sql_conf({"spark.sql.crossJoin.enabled": False}): self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect()) with self.sql_conf({"spark.sql.crossJoin.enabled": True}): actual = df1.join(df2, how="inner").collect() expected = [Row(a=0, b=0)] self.assertEqual(actual, expected) # Regression test for invalid join methods when on is None, Spark-14761 def test_invalid_join_method(self): df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"]) df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"]) self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type")) # Cartesian products require cross join syntax def test_require_cross(self): from pyspark.sql.functions import broadcast df1 = self.spark.createDataFrame([(1, "1")], ("key", "value")) df2 = self.spark.createDataFrame([(1, "1")], ("key", "value")) # joins without conditions require cross join syntax self.assertRaises(AnalysisException, lambda: df1.join(df2).collect()) # works with crossJoin self.assertEqual(1, df1.crossJoin(df2).count()) def test_conf(self): spark = self.spark spark.conf.set("bogo", "sipeo") self.assertEqual(spark.conf.get("bogo"), "sipeo") spark.conf.set("bogo", "ta") self.assertEqual(spark.conf.get("bogo"), "ta") self.assertEqual(spark.conf.get("bogo", "not.read"), "ta") self.assertEqual(spark.conf.get("not.set", "ta"), "ta") self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set")) spark.conf.unset("bogo") self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia") self.assertEqual(spark.conf.get("hyukjin", None), None) # This returns 'STATIC' because it's the default value of # 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in # `spark.conf.get` is unset. self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC") # This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but # `defaultValue` in `spark.conf.get` is set to None. self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None) def test_current_database(self): spark = self.spark spark.catalog._reset() self.assertEquals(spark.catalog.currentDatabase(), "default") spark.sql("CREATE DATABASE some_db") spark.catalog.setCurrentDatabase("some_db") self.assertEquals(spark.catalog.currentDatabase(), "some_db") self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.setCurrentDatabase("does_not_exist")) def test_list_databases(self): spark = self.spark spark.catalog._reset() databases = [db.name for db in spark.catalog.listDatabases()] self.assertEquals(databases, ["default"]) spark.sql("CREATE DATABASE some_db") databases = [db.name for db in spark.catalog.listDatabases()] self.assertEquals(sorted(databases), ["default", "some_db"]) def test_list_tables(self): from pyspark.sql.catalog import Table spark = self.spark spark.catalog._reset() spark.sql("CREATE DATABASE some_db") self.assertEquals(spark.catalog.listTables(), []) self.assertEquals(spark.catalog.listTables("some_db"), []) spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab") spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet") spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet") tables = sorted(spark.catalog.listTables(), key=lambda t: t.name) tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name) tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name) self.assertEquals(tables, tablesDefault) self.assertEquals(len(tables), 2) self.assertEquals(len(tablesSomeDb), 2) self.assertEquals(tables[0], Table( name="tab1", database="default", description=None, tableType="MANAGED", isTemporary=False)) self.assertEquals(tables[1], Table( name="temp_tab", database=None, description=None, tableType="TEMPORARY", isTemporary=True)) self.assertEquals(tablesSomeDb[0], Table( name="tab2", database="some_db", description=None, tableType="MANAGED", isTemporary=False)) self.assertEquals(tablesSomeDb[1], Table( name="temp_tab", database=None, description=None, tableType="TEMPORARY", isTemporary=True)) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.listTables("does_not_exist")) def test_list_functions(self): from pyspark.sql.catalog import Function spark = self.spark spark.catalog._reset() spark.sql("CREATE DATABASE some_db") functions = dict((f.name, f) for f in spark.catalog.listFunctions()) functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default")) self.assertTrue(len(functions) > 200) self.assertTrue("+" in functions) self.assertTrue("like" in functions) self.assertTrue("month" in functions) self.assertTrue("to_date" in functions) self.assertTrue("to_timestamp" in functions) self.assertTrue("to_unix_timestamp" in functions) self.assertTrue("current_database" in functions) self.assertEquals(functions["+"], Function( name="+", description=None, className="org.apache.spark.sql.catalyst.expressions.Add", isTemporary=True)) self.assertEquals(functions, functionsDefault) spark.catalog.registerFunction("temp_func", lambda x: str(x)) spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'") spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'") newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions()) newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db")) self.assertTrue(set(functions).issubset(set(newFunctions))) self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb))) self.assertTrue("temp_func" in newFunctions) self.assertTrue("func1" in newFunctions) self.assertTrue("func2" not in newFunctions) self.assertTrue("temp_func" in newFunctionsSomeDb) self.assertTrue("func1" not in newFunctionsSomeDb) self.assertTrue("func2" in newFunctionsSomeDb) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.listFunctions("does_not_exist")) def test_list_columns(self): from pyspark.sql.catalog import Column spark = self.spark spark.catalog._reset() spark.sql("CREATE DATABASE some_db") spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet") spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet") columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name) columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name) self.assertEquals(columns, columnsDefault) self.assertEquals(len(columns), 2) self.assertEquals(columns[0], Column( name="age", description=None, dataType="int", nullable=True, isPartition=False, isBucket=False)) self.assertEquals(columns[1], Column( name="name", description=None, dataType="string", nullable=True, isPartition=False, isBucket=False)) columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name) self.assertEquals(len(columns2), 2) self.assertEquals(columns2[0], Column( name="nickname", description=None, dataType="string", nullable=True, isPartition=False, isBucket=False)) self.assertEquals(columns2[1], Column( name="tolerance", description=None, dataType="float", nullable=True, isPartition=False, isBucket=False)) self.assertRaisesRegexp( AnalysisException, "tab2", lambda: spark.catalog.listColumns("tab2")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.listColumns("does_not_exist")) def test_cache(self): spark = self.spark spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1") spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2") self.assertFalse(spark.catalog.isCached("tab1")) self.assertFalse(spark.catalog.isCached("tab2")) spark.catalog.cacheTable("tab1") self.assertTrue(spark.catalog.isCached("tab1")) self.assertFalse(spark.catalog.isCached("tab2")) spark.catalog.cacheTable("tab2") spark.catalog.uncacheTable("tab1") self.assertFalse(spark.catalog.isCached("tab1")) self.assertTrue(spark.catalog.isCached("tab2")) spark.catalog.clearCache() self.assertFalse(spark.catalog.isCached("tab1")) self.assertFalse(spark.catalog.isCached("tab2")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.isCached("does_not_exist")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.cacheTable("does_not_exist")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.uncacheTable("does_not_exist")) def test_read_text_file_list(self): df = self.spark.read.text(['python/test_support/sql/text-test.txt', 'python/test_support/sql/text-test.txt']) count = df.count() self.assertEquals(count, 4) def test_BinaryType_serialization(self): # Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808 # The empty bytearray is test for SPARK-21534. schema = StructType([StructField('mybytes', BinaryType())]) data = [[bytearray(b'here is my data')], [bytearray(b'and here is some more')], [bytearray(b'')]] df = self.spark.createDataFrame(data, schema=schema) df.collect() # test for SPARK-16542 def test_array_types(self): # This test need to make sure that the Scala type selected is at least # as large as the python's types. This is necessary because python's # array types depend on C implementation on the machine. Therefore there # is no machine independent correspondence between python's array types # and Scala types. # See: https://docs.python.org/2/library/array.html def assertCollectSuccess(typecode, value): row = Row(myarray=array.array(typecode, [value])) df = self.spark.createDataFrame([row]) self.assertEqual(df.first()["myarray"][0], value) # supported string types # # String types in python's array are "u" for Py_UNICODE and "c" for char. # "u" will be removed in python 4, and "c" is not supported in python 3. supported_string_types = [] if sys.version_info[0] < 4: supported_string_types += ['u'] # test unicode assertCollectSuccess('u', u'a') if sys.version_info[0] < 3: supported_string_types += ['c'] # test string assertCollectSuccess('c', 'a') # supported float and double # # Test max, min, and precision for float and double, assuming IEEE 754 # floating-point format. supported_fractional_types = ['f', 'd'] assertCollectSuccess('f', ctypes.c_float(1e+38).value) assertCollectSuccess('f', ctypes.c_float(1e-38).value) assertCollectSuccess('f', ctypes.c_float(1.123456).value) assertCollectSuccess('d', sys.float_info.max) assertCollectSuccess('d', sys.float_info.min) assertCollectSuccess('d', sys.float_info.epsilon) # supported signed int types # # The size of C types changes with implementation, we need to make sure # that there is no overflow error on the platform running this test. supported_signed_int_types = list( set(_array_signed_int_typecode_ctype_mappings.keys()) .intersection(set(_array_type_mappings.keys()))) for t in supported_signed_int_types: ctype = _array_signed_int_typecode_ctype_mappings[t] max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1) assertCollectSuccess(t, max_val - 1) assertCollectSuccess(t, -max_val) # supported unsigned int types # # JVM does not have unsigned types. We need to be very careful to make # sure that there is no overflow error. supported_unsigned_int_types = list( set(_array_unsigned_int_typecode_ctype_mappings.keys()) .intersection(set(_array_type_mappings.keys()))) for t in supported_unsigned_int_types: ctype = _array_unsigned_int_typecode_ctype_mappings[t] assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1) # all supported types # # Make sure the types tested above: # 1. are all supported types # 2. cover all supported types supported_types = (supported_string_types + supported_fractional_types + supported_signed_int_types + supported_unsigned_int_types) self.assertEqual(set(supported_types), set(_array_type_mappings.keys())) # all unsupported types # # Keys in _array_type_mappings is a complete list of all supported types, # and types not in _array_type_mappings are considered unsupported. # `array.typecodes` are not supported in python 2. if sys.version_info[0] < 3: all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd']) else: all_types = set(array.typecodes) unsupported_types = all_types - set(supported_types) # test unsupported types for t in unsupported_types: with self.assertRaises(TypeError): a = array.array(t) self.spark.createDataFrame([Row(myarray=a)]).collect() def test_bucketed_write(self): data = [ (1, "foo", 3.0), (2, "foo", 5.0), (3, "bar", -1.0), (4, "bar", 6.0), ] df = self.spark.createDataFrame(data, ["x", "y", "z"]) def count_bucketed_cols(names, table="pyspark_bucket"): """Given a sequence of column names and a table name query the catalog and return number o columns which are used for bucketing """ cols = self.spark.catalog.listColumns(table) num = len([c for c in cols if c.name in names and c.isBucket]) return num # Test write with one bucketing column df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket") self.assertEqual(count_bucketed_cols(["x"]), 1) self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect())) # Test write two bucketing columns df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket") self.assertEqual(count_bucketed_cols(["x", "y"]), 2) self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect())) # Test write with bucket and sort df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket") self.assertEqual(count_bucketed_cols(["x"]), 1) self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect())) # Test write with a list of columns df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket") self.assertEqual(count_bucketed_cols(["x", "y"]), 2) self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect())) # Test write with bucket and sort with a list of columns (df.write.bucketBy(2, "x") .sortBy(["y", "z"]) .mode("overwrite").saveAsTable("pyspark_bucket")) self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect())) # Test write with bucket and sort with multiple columns (df.write.bucketBy(2, "x") .sortBy("y", "z") .mode("overwrite").saveAsTable("pyspark_bucket")) self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect())) def _to_pandas(self): from datetime import datetime, date schema = StructType().add("a", IntegerType()).add("b", StringType())\ .add("c", BooleanType()).add("d", FloatType())\ .add("dt", DateType()).add("ts", TimestampType()) data = [ (1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)), (2, "foo", True, 5.0, None, None), (3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)), (4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)), ] df = self.spark.createDataFrame(data, schema) return df.toPandas() @unittest.skipIf(not _have_pandas, _pandas_requirement_message) def test_to_pandas(self): import numpy as np pdf = self._to_pandas() types = pdf.dtypes self.assertEquals(types[0], np.int32) self.assertEquals(types[1], np.object) self.assertEquals(types[2], np.bool) self.assertEquals(types[3], np.float32) self.assertEquals(types[4], np.object) # datetime.date self.assertEquals(types[5], 'datetime64[ns]') @unittest.skipIf(_have_pandas, "Required Pandas was found.") def test_to_pandas_required_pandas_not_found(self): with QuietTest(self.sc): with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'): self._to_pandas() @unittest.skipIf(not _have_pandas, _pandas_requirement_message) def test_to_pandas_avoid_astype(self): import numpy as np schema = StructType().add("a", IntegerType()).add("b", StringType())\ .add("c", IntegerType()) data = [(1, "foo", 16777220), (None, "bar", None)] df = self.spark.createDataFrame(data, schema) types = df.toPandas().dtypes self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value. self.assertEquals(types[1], np.object) self.assertEquals(types[2], np.float64) def test_create_dataframe_from_array_of_long(self): import array data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))] df = self.spark.createDataFrame(data) self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807])) @unittest.skipIf(not _have_pandas, _pandas_requirement_message) def test_create_dataframe_from_pandas_with_timestamp(self): import pandas as pd from datetime import datetime pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)], "d": [pd.Timestamp.now().date()]}) # test types are inferred correctly without specifying schema df = self.spark.createDataFrame(pdf) self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType)) self.assertTrue(isinstance(df.schema['d'].dataType, DateType)) # test with schema will accept pdf as input df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp") self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType)) self.assertTrue(isinstance(df.schema['d'].dataType, DateType)) @unittest.skipIf(_have_pandas, "Required Pandas was found.") def test_create_dataframe_required_pandas_not_found(self): with QuietTest(self.sc): with self.assertRaisesRegexp( ImportError, "(Pandas >= .* must be installed|No module named '?pandas'?)"): import pandas as pd from datetime import datetime pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)], "d": [pd.Timestamp.now().date()]}) self.spark.createDataFrame(pdf) # Regression test for SPARK-23360 @unittest.skipIf(not _have_pandas, _pandas_requirement_message) def test_create_dateframe_from_pandas_with_dst(self): import pandas as pd from datetime import datetime pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]}) df = self.spark.createDataFrame(pdf) self.assertPandasEqual(pdf, df.toPandas()) orig_env_tz = os.environ.get('TZ', None) try: tz = 'America/Los_Angeles' os.environ['TZ'] = tz time.tzset() with self.sql_conf({'spark.sql.session.timeZone': tz}): df = self.spark.createDataFrame(pdf) self.assertPandasEqual(pdf, df.toPandas()) finally: del os.environ['TZ'] if orig_env_tz is not None: os.environ['TZ'] = orig_env_tz time.tzset() def test_sort_with_nulls_order(self): from pyspark.sql import functions df = self.spark.createDataFrame( [('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"]) self.assertEquals( df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(), [Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')]) self.assertEquals( df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(), [Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)]) self.assertEquals( df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(), [Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')]) self.assertEquals( df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(), [Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)]) def test_json_sampling_ratio(self): rdd = self.spark.sparkContext.range(0, 100, 1, 1) \ .map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x)) schema = self.spark.read.option('inferSchema', True) \ .option('samplingRatio', 0.5) \ .json(rdd).schema self.assertEquals(schema, StructType([StructField("a", LongType(), True)])) def test_csv_sampling_ratio(self): rdd = self.spark.sparkContext.range(0, 100, 1, 1) \ .map(lambda x: '0.1' if x == 1 else str(x)) schema = self.spark.read.option('inferSchema', True)\ .csv(rdd, samplingRatio=0.5).schema self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)])) class HiveSparkSubmitTests(SparkSubmitTests): def test_hivecontext(self): # This test checks that HiveContext is using Hive metastore (SPARK-16224). # It sets a metastore url and checks if there is a derby dir created by # Hive metastore. If this derby dir exists, HiveContext is using # Hive metastore. metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db") metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true" hive_site_dir = os.path.join(self.programDir, "conf") hive_site_file = self.createTempFile("hive-site.xml", (""" |<configuration> | <property> | <name>javax.jdo.option.ConnectionURL</name> | <value>%s</value> | </property> |</configuration> """ % metastore_URL).lstrip(), "conf") script = self.createTempFile("test.py", """ |import os | |from pyspark.conf import SparkConf |from pyspark.context import SparkContext |from pyspark.sql import HiveContext | |conf = SparkConf() |sc = SparkContext(conf=conf) |hive_context = HiveContext(sc) |print(hive_context.sql("show databases").collect()) """) proc = subprocess.Popen( [self.sparkSubmit, "--master", "local-cluster[1,1,1024]", "--driver-class-path", hive_site_dir, script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("default", out.decode('utf-8')) self.assertTrue(os.path.exists(metastore_path)) class SQLTests2(ReusedSQLTestCase): # We can't include this test into SQLTests because we will stop class's SparkContext and cause # other tests failed. def test_sparksession_with_stopped_sparkcontext(self): self.sc.stop() sc = SparkContext('local[4]', self.sc.appName) spark = SparkSession.builder.getOrCreate() try: df = spark.createDataFrame([(1, 2)], ["c", "c"]) df.collect() finally: spark.stop() sc.stop() class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils): # These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is # static and immutable. This can't be set or unset, for example, via `spark.conf`. @classmethod def setUpClass(cls): import glob from pyspark.find_spark_home import _find_spark_home SPARK_HOME = _find_spark_home() filename_pattern = ( "sql/core/target/scala-*/test-classes/org/apache/spark/sql/" "TestQueryExecutionListener.class") cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern))) if cls.has_listener: # Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration. cls.spark = SparkSession.builder \ .master("local[4]") \ .appName(cls.__name__) \ .config( "spark.sql.queryExecutionListeners", "org.apache.spark.sql.TestQueryExecutionListener") \ .getOrCreate() def setUp(self): if not self.has_listener: raise self.skipTest( "'org.apache.spark.sql.TestQueryExecutionListener' is not " "available. Will skip the related tests.") @classmethod def tearDownClass(cls): if hasattr(cls, "spark"): cls.spark.stop() def tearDown(self): self.spark._jvm.OnSuccessCall.clear() def test_query_execution_listener_on_collect(self): self.assertFalse( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should not be called before 'collect'") self.spark.sql("SELECT * FROM range(1)").collect() self.assertTrue( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should be called after 'collect'") @unittest.skipIf( not _have_pandas or not _have_pyarrow, _pandas_requirement_message or _pyarrow_requirement_message) def test_query_execution_listener_on_collect_with_arrow(self): with self.sql_conf({"spark.sql.execution.arrow.enabled": True}): self.assertFalse( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should not be " "called before 'toPandas'") self.spark.sql("SELECT * FROM range(1)").toPandas() self.assertTrue( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should be called after 'toPandas'") class SparkSessionTests(PySparkTestCase): # This test is separate because it's closely related with session's start and stop. # See SPARK-23228. def test_set_jvm_default_session(self): spark = SparkSession.builder.getOrCreate() try: self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined()) finally: spark.stop() self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty()) def test_jvm_default_session_already_set(self): # Here, we assume there is the default session already set in JVM. jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc()) self.sc._jvm.SparkSession.setDefaultSession(jsession) spark = SparkSession.builder.getOrCreate() try: self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined()) # The session should be the same with the exiting one. self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get())) finally: spark.stop() class UDFInitializationTests(unittest.TestCase): def tearDown(self): if SparkSession._instantiatedSession is not None: SparkSession._instantiatedSession.stop() if SparkContext._active_spark_context is not None: SparkContext._active_spark_contex.stop() def test_udf_init_shouldnt_initalize_context(self): from pyspark.sql.functions import UserDefinedFunction UserDefinedFunction(lambda x: x, StringType()) self.assertIsNone( SparkContext._active_spark_context, "SparkContext shouldn't be initialized when UserDefinedFunction is created." ) self.assertIsNone( SparkSession._instantiatedSession, "SparkSession shouldn't be initialized when UserDefinedFunction is created." ) class HiveContextSQLTests(ReusedPySparkTestCase): @classmethod def setUpClass(cls): ReusedPySparkTestCase.setUpClass() cls.tempdir = tempfile.NamedTemporaryFile(delete=False) cls.hive_available = True try: cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf() except py4j.protocol.Py4JError: cls.hive_available = False except TypeError: cls.hive_available = False os.unlink(cls.tempdir.name) if cls.hive_available: cls.spark = HiveContext._createForTesting(cls.sc) cls.testData = [Row(key=i, value=str(i)) for i in range(100)] cls.df = cls.sc.parallelize(cls.testData).toDF() def setUp(self): if not self.hive_available: self.skipTest("Hive is not available.") @classmethod def tearDownClass(cls): ReusedPySparkTestCase.tearDownClass() shutil.rmtree(cls.tempdir.name, ignore_errors=True) def test_save_and_load_table(self): df = self.df tmpPath = tempfile.mkdtemp() shutil.rmtree(tmpPath) df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath) actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json") self.assertEqual(sorted(df.collect()), sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect())) self.assertEqual(sorted(df.collect()), sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect())) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) self.spark.sql("DROP TABLE externalJsonTable") df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath) schema = StructType([StructField("value", StringType(), True)]) actual = self.spark.createExternalTable("externalJsonTable", source="json", schema=schema, path=tmpPath, noUse="this options will not be used") self.assertEqual(sorted(df.collect()), sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect())) self.assertEqual(sorted(df.select("value").collect()), sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect())) self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect())) self.spark.sql("DROP TABLE savedJsonTable") self.spark.sql("DROP TABLE externalJsonTable") defaultDataSourceName = self.spark.getConf("spark.sql.sources.default", "org.apache.spark.sql.parquet") self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json") df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite") actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath) self.assertEqual(sorted(df.collect()), sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect())) self.assertEqual(sorted(df.collect()), sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect())) self.assertEqual(sorted(df.collect()), sorted(actual.collect())) self.spark.sql("DROP TABLE savedJsonTable") self.spark.sql("DROP TABLE externalJsonTable") self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName) shutil.rmtree(tmpPath) def test_window_functions(self): df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"]) w = Window.partitionBy("value").orderBy("key") from pyspark.sql import functions as F sel = df.select(df.value, df.key, F.max("key").over(w.rowsBetween(0, 1)), F.min("key").over(w.rowsBetween(0, 1)), F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))), F.row_number().over(w), F.rank().over(w), F.dense_rank().over(w), F.ntile(2).over(w)) rs = sorted(sel.collect()) expected = [ ("1", 1, 1, 1, 1, 1, 1, 1, 1), ("2", 1, 1, 1, 3, 1, 1, 1, 1), ("2", 1, 2, 1, 3, 2, 1, 1, 1), ("2", 2, 2, 2, 3, 3, 3, 2, 2) ] for r, ex in zip(rs, expected): self.assertEqual(tuple(r), ex[:len(r)]) def test_window_functions_without_partitionBy(self): df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"]) w = Window.orderBy("key", df.value) from pyspark.sql import functions as F sel = df.select(df.value, df.key, F.max("key").over(w.rowsBetween(0, 1)), F.min("key").over(w.rowsBetween(0, 1)), F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))), F.row_number().over(w), F.rank().over(w), F.dense_rank().over(w), F.ntile(2).over(w)) rs = sorted(sel.collect()) expected = [ ("1", 1, 1, 1, 4, 1, 1, 1, 1), ("2", 1, 1, 1, 4, 2, 2, 2, 1), ("2", 1, 2, 1, 4, 3, 2, 2, 2), ("2", 2, 2, 2, 4, 4, 4, 3, 2) ] for r, ex in zip(rs, expected): self.assertEqual(tuple(r), ex[:len(r)]) def test_window_functions_cumulative_sum(self): df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"]) from pyspark.sql import functions as F # Test cumulative sum sel = df.select( df.key, F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0))) rs = sorted(sel.collect()) expected = [("one", 1), ("two", 3)] for r, ex in zip(rs, expected): self.assertEqual(tuple(r), ex[:len(r)]) # Test boundary values less than JVM's Long.MinValue and make sure we don't overflow sel = df.select( df.key, F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0))) rs = sorted(sel.collect()) expected = [("one", 1), ("two", 3)] for r, ex in zip(rs, expected): self.assertEqual(tuple(r), ex[:len(r)]) # Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow frame_end = Window.unboundedFollowing + 1 sel = df.select( df.key, F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end))) rs = sorted(sel.collect()) expected = [("one", 3), ("two", 2)] for r, ex in zip(rs, expected): self.assertEqual(tuple(r), ex[:len(r)]) def test_collect_functions(self): df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"]) from pyspark.sql import functions self.assertEqual( sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r), [1, 2]) self.assertEqual( sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r), [1, 1, 1, 2]) self.assertEqual( sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r), ["1", "2"]) self.assertEqual( sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r), ["1", "2", "2", "2"]) def test_limit_and_take(self): df = self.spark.range(1, 1000, numPartitions=10) def assert_runs_only_one_job_stage_and_task(job_group_name, f): tracker = self.sc.statusTracker() self.sc.setJobGroup(job_group_name, description="") f() jobs = tracker.getJobIdsForGroup(job_group_name) self.assertEqual(1, len(jobs)) stages = tracker.getJobInfo(jobs[0]).stageIds self.assertEqual(1, len(stages)) self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks) # Regression test for SPARK-10731: take should delegate to Scala implementation assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1)) # Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n) assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect()) def test_datetime_functions(self): from pyspark.sql import functions from datetime import date, datetime df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol") parse_result = df.select(functions.to_date(functions.col("dateCol"))).first() self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)']) @unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking") def test_unbounded_frames(self): from unittest.mock import patch from pyspark.sql import functions as F from pyspark.sql import window import importlib df = self.spark.range(0, 3) def rows_frame_match(): return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select( F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize)) ).columns[0] def range_frame_match(): return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select( F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize)) ).columns[0] with patch("sys.maxsize", 2 ** 31 - 1): importlib.reload(window) self.assertTrue(rows_frame_match()) self.assertTrue(range_frame_match()) with patch("sys.maxsize", 2 ** 63 - 1): importlib.reload(window) self.assertTrue(rows_frame_match()) self.assertTrue(range_frame_match()) with patch("sys.maxsize", 2 ** 127 - 1): importlib.reload(window) self.assertTrue(rows_frame_match()) self.assertTrue(range_frame_match()) importlib.reload(window) class DataTypeVerificationTests(unittest.TestCase): def test_verify_type_exception_msg(self): self.assertRaisesRegexp( ValueError, "test_name", lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None)) schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))]) self.assertRaisesRegexp( TypeError, "field b in field a", lambda: _make_type_verifier(schema)([["data"]])) def test_verify_type_ok_nullable(self): obj = None types = [IntegerType(), FloatType(), StringType(), StructType([])] for data_type in types: try: _make_type_verifier(data_type, nullable=True)(obj) except Exception: self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type)) def test_verify_type_not_nullable(self): import array import datetime import decimal schema = StructType([ StructField('s', StringType(), nullable=False), StructField('i', IntegerType(), nullable=True)]) class MyObj: def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) # obj, data_type success_spec = [ # String ("", StringType()), (u"", StringType()), (1, StringType()), (1.0, StringType()), ([], StringType()), ({}, StringType()), # UDT (ExamplePoint(1.0, 2.0), ExamplePointUDT()), # Boolean (True, BooleanType()), # Byte (-(2**7), ByteType()), (2**7 - 1, ByteType()), # Short (-(2**15), ShortType()), (2**15 - 1, ShortType()), # Integer (-(2**31), IntegerType()), (2**31 - 1, IntegerType()), # Long (2**64, LongType()), # Float & Double (1.0, FloatType()), (1.0, DoubleType()), # Decimal (decimal.Decimal("1.0"), DecimalType()), # Binary (bytearray([1, 2]), BinaryType()), # Date/Timestamp (datetime.date(2000, 1, 2), DateType()), (datetime.datetime(2000, 1, 2, 3, 4), DateType()), (datetime.datetime(2000, 1, 2, 3, 4), TimestampType()), # Array ([], ArrayType(IntegerType())), (["1", None], ArrayType(StringType(), containsNull=True)), ([1, 2], ArrayType(IntegerType())), ((1, 2), ArrayType(IntegerType())), (array.array('h', [1, 2]), ArrayType(IntegerType())), # Map ({}, MapType(StringType(), IntegerType())), ({"a": 1}, MapType(StringType(), IntegerType())), ({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)), # Struct ({"s": "a", "i": 1}, schema), ({"s": "a", "i": None}, schema), ({"s": "a"}, schema), ({"s": "a", "f": 1.0}, schema), (Row(s="a", i=1), schema), (Row(s="a", i=None), schema), (Row(s="a", i=1, f=1.0), schema), (["a", 1], schema), (["a", None], schema), (("a", 1), schema), (MyObj(s="a", i=1), schema), (MyObj(s="a", i=None), schema), (MyObj(s="a"), schema), ] # obj, data_type, exception class failure_spec = [ # String (match anything but None) (None, StringType(), ValueError), # UDT (ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError), # Boolean (1, BooleanType(), TypeError), ("True", BooleanType(), TypeError), ([1], BooleanType(), TypeError), # Byte (-(2**7) - 1, ByteType(), ValueError), (2**7, ByteType(), ValueError), ("1", ByteType(), TypeError), (1.0, ByteType(), TypeError), # Short (-(2**15) - 1, ShortType(), ValueError), (2**15, ShortType(), ValueError), # Integer (-(2**31) - 1, IntegerType(), ValueError), (2**31, IntegerType(), ValueError), # Float & Double (1, FloatType(), TypeError), (1, DoubleType(), TypeError), # Decimal (1.0, DecimalType(), TypeError), (1, DecimalType(), TypeError), ("1.0", DecimalType(), TypeError), # Binary (1, BinaryType(), TypeError), # Date/Timestamp ("2000-01-02", DateType(), TypeError), (946811040, TimestampType(), TypeError), # Array (["1", None], ArrayType(StringType(), containsNull=False), ValueError), ([1, "2"], ArrayType(IntegerType()), TypeError), # Map ({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError), ({"a": "1"}, MapType(StringType(), IntegerType()), TypeError), ({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False), ValueError), # Struct ({"s": "a", "i": "1"}, schema, TypeError), (Row(s="a"), schema, ValueError), # Row can't have missing field (Row(s="a", i="1"), schema, TypeError), (["a"], schema, ValueError), (["a", "1"], schema, TypeError), (MyObj(s="a", i="1"), schema, TypeError), (MyObj(s=None, i="1"), schema, ValueError), ] # Check success cases for obj, data_type in success_spec: try: _make_type_verifier(data_type, nullable=False)(obj) except Exception: self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type)) # Check failure cases for obj, data_type, exp in failure_spec: msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp) with self.assertRaises(exp, msg=msg): _make_type_verifier(data_type, nullable=False)(obj) @unittest.skipIf( not _have_pandas or not _have_pyarrow, _pandas_requirement_message or _pyarrow_requirement_message) class ArrowTests(ReusedSQLTestCase): @classmethod def setUpClass(cls): from datetime import date, datetime from decimal import Decimal ReusedSQLTestCase.setUpClass() # Synchronize default timezone between Python and Java cls.tz_prev = os.environ.get("TZ", None) # save current tz if set tz = "America/Los_Angeles" os.environ["TZ"] = tz time.tzset() cls.spark.conf.set("spark.sql.session.timeZone", tz) cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true") # Disable fallback by default to easily detect the failures. cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false") cls.schema = StructType([ StructField("1_str_t", StringType(), True), StructField("2_int_t", IntegerType(), True), StructField("3_long_t", LongType(), True), StructField("4_float_t", FloatType(), True), StructField("5_double_t", DoubleType(), True), StructField("6_decimal_t", DecimalType(38, 18), True), StructField("7_date_t", DateType(), True), StructField("8_timestamp_t", TimestampType(), True)]) cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"), date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)), (u"b", 2, 20, 0.4, 4.0, Decimal("4.0"), date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)), (u"c", 3, 30, 0.8, 6.0, Decimal("6.0"), date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))] @classmethod def tearDownClass(cls): del os.environ["TZ"] if cls.tz_prev is not None: os.environ["TZ"] = cls.tz_prev time.tzset() ReusedSQLTestCase.tearDownClass() def create_pandas_data_frame(self): import pandas as pd import numpy as np data_dict = {} for j, name in enumerate(self.schema.names): data_dict[name] = [self.data[i][j] for i in range(len(self.data))] # need to convert these to numpy types first data_dict["2_int_t"] = np.int32(data_dict["2_int_t"]) data_dict["4_float_t"] = np.float32(data_dict["4_float_t"]) return pd.DataFrame(data=data_dict) def test_toPandas_fallback_enabled(self): import pandas as pd with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}): schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)]) df = self.spark.createDataFrame([({u'a': 1},)], schema=schema) with QuietTest(self.sc): with warnings.catch_warnings(record=True) as warns: pdf = df.toPandas() # Catch and check the last UserWarning. user_warns = [ warn.message for warn in warns if isinstance(warn.message, UserWarning)] self.assertTrue(len(user_warns) > 0) self.assertTrue( "Attempting non-optimization" in _exception_message(user_warns[-1])) self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]})) def test_toPandas_fallback_disabled(self): schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)]) df = self.spark.createDataFrame([(None,)], schema=schema) with QuietTest(self.sc): with self.assertRaisesRegexp(Exception, 'Unsupported type'): df.toPandas() def test_null_conversion(self): df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] + self.data) pdf = df_null.toPandas() null_counts = pdf.isnull().sum().tolist() self.assertTrue(all([c == 1 for c in null_counts])) def _toPandas_arrow_toggle(self, df): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): pdf = df.toPandas() pdf_arrow = df.toPandas() return pdf, pdf_arrow def test_toPandas_arrow_toggle(self): df = self.spark.createDataFrame(self.data, schema=self.schema) pdf, pdf_arrow = self._toPandas_arrow_toggle(df) expected = self.create_pandas_data_frame() self.assertPandasEqual(expected, pdf) self.assertPandasEqual(expected, pdf_arrow) def test_toPandas_respect_session_timezone(self): df = self.spark.createDataFrame(self.data, schema=self.schema) timezone = "America/New_York" with self.sql_conf({ "spark.sql.execution.pandas.respectSessionTimeZone": False, "spark.sql.session.timeZone": timezone}): pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df) self.assertPandasEqual(pdf_arrow_la, pdf_la) with self.sql_conf({ "spark.sql.execution.pandas.respectSessionTimeZone": True, "spark.sql.session.timeZone": timezone}): pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df) self.assertPandasEqual(pdf_arrow_ny, pdf_ny) self.assertFalse(pdf_ny.equals(pdf_la)) from pyspark.sql.types import _check_series_convert_timestamps_local_tz pdf_la_corrected = pdf_la.copy() for field in self.schema: if isinstance(field.dataType, TimestampType): pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz( pdf_la_corrected[field.name], timezone) self.assertPandasEqual(pdf_ny, pdf_la_corrected) def test_pandas_round_trip(self): pdf = self.create_pandas_data_frame() df = self.spark.createDataFrame(self.data, schema=self.schema) pdf_arrow = df.toPandas() self.assertPandasEqual(pdf_arrow, pdf) def test_filtered_frame(self): df = self.spark.range(3).toDF("i") pdf = df.filter("i < 0").toPandas() self.assertEqual(len(pdf.columns), 1) self.assertEqual(pdf.columns[0], "i") self.assertTrue(pdf.empty) def _createDataFrame_toggle(self, pdf, schema=None): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): df_no_arrow = self.spark.createDataFrame(pdf, schema=schema) df_arrow = self.spark.createDataFrame(pdf, schema=schema) return df_no_arrow, df_arrow def test_createDataFrame_toggle(self): pdf = self.create_pandas_data_frame() df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema) self.assertEquals(df_no_arrow.collect(), df_arrow.collect()) def test_createDataFrame_respect_session_timezone(self): from datetime import timedelta pdf = self.create_pandas_data_frame() timezone = "America/New_York" with self.sql_conf({ "spark.sql.execution.pandas.respectSessionTimeZone": False, "spark.sql.session.timeZone": timezone}): df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema) result_la = df_no_arrow_la.collect() result_arrow_la = df_arrow_la.collect() self.assertEqual(result_la, result_arrow_la) with self.sql_conf({ "spark.sql.execution.pandas.respectSessionTimeZone": True, "spark.sql.session.timeZone": timezone}): df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema) result_ny = df_no_arrow_ny.collect() result_arrow_ny = df_arrow_ny.collect() self.assertEqual(result_ny, result_arrow_ny) self.assertNotEqual(result_ny, result_la) # Correct result_la by adjusting 3 hours difference between Los Angeles and New York result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v for k, v in row.asDict().items()}) for row in result_la] self.assertEqual(result_ny, result_la_corrected) def test_createDataFrame_with_schema(self): pdf = self.create_pandas_data_frame() df = self.spark.createDataFrame(pdf, schema=self.schema) self.assertEquals(self.schema, df.schema) pdf_arrow = df.toPandas() self.assertPandasEqual(pdf_arrow, pdf) def test_createDataFrame_with_incorrect_schema(self): pdf = self.create_pandas_data_frame() wrong_schema = StructType(list(reversed(self.schema))) with QuietTest(self.sc): with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"): self.spark.createDataFrame(pdf, schema=wrong_schema) def test_createDataFrame_with_names(self): pdf = self.create_pandas_data_frame() # Test that schema as a list of column names gets applied df = self.spark.createDataFrame(pdf, schema=list('abcdefgh')) self.assertEquals(df.schema.fieldNames(), list('abcdefgh')) # Test that schema as tuple of column names gets applied df = self.spark.createDataFrame(pdf, schema=tuple('abcdefgh')) self.assertEquals(df.schema.fieldNames(), list('abcdefgh')) def test_createDataFrame_column_name_encoding(self): import pandas as pd pdf = pd.DataFrame({u'a': [1]}) columns = self.spark.createDataFrame(pdf).columns self.assertTrue(isinstance(columns[0], str)) self.assertEquals(columns[0], 'a') columns = self.spark.createDataFrame(pdf, [u'b']).columns self.assertTrue(isinstance(columns[0], str)) self.assertEquals(columns[0], 'b') def test_createDataFrame_with_single_data_type(self): import pandas as pd with QuietTest(self.sc): with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"): self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int") def test_createDataFrame_does_not_modify_input(self): import pandas as pd # Some series get converted for Spark to consume, this makes sure input is unchanged pdf = self.create_pandas_data_frame() # Use a nanosecond value to make sure it is not truncated pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1) # Integers with nulls will get NaNs filled with 0 and will be casted pdf.ix[1, '2_int_t'] = None pdf_copy = pdf.copy(deep=True) self.spark.createDataFrame(pdf, schema=self.schema) self.assertTrue(pdf.equals(pdf_copy)) def test_schema_conversion_roundtrip(self): from pyspark.sql.types import from_arrow_schema, to_arrow_schema arrow_schema = to_arrow_schema(self.schema) schema_rt = from_arrow_schema(arrow_schema) self.assertEquals(self.schema, schema_rt) def test_createDataFrame_with_array_type(self): import pandas as pd pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]}) df, df_arrow = self._createDataFrame_toggle(pdf) result = df.collect() result_arrow = df_arrow.collect() expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)] for r in range(len(expected)): for e in range(len(expected[r])): self.assertTrue(expected[r][e] == result_arrow[r][e] and result[r][e] == result_arrow[r][e]) def test_toPandas_with_array_type(self): expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])] array_schema = StructType([StructField("a", ArrayType(IntegerType())), StructField("b", ArrayType(StringType()))]) df = self.spark.createDataFrame(expected, schema=array_schema) pdf, pdf_arrow = self._toPandas_arrow_toggle(df) result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)] result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)] for r in range(len(expected)): for e in range(len(expected[r])): self.assertTrue(expected[r][e] == result_arrow[r][e] and result[r][e] == result_arrow[r][e]) def test_createDataFrame_with_int_col_names(self): import numpy as np import pandas as pd pdf = pd.DataFrame(np.random.rand(4, 2)) df, df_arrow = self._createDataFrame_toggle(pdf) pdf_col_names = [str(c) for c in pdf.columns] self.assertEqual(pdf_col_names, df.columns) self.assertEqual(pdf_col_names, df_arrow.columns) def test_createDataFrame_fallback_enabled(self): import pandas as pd with QuietTest(self.sc): with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}): with warnings.catch_warnings(record=True) as warns: df = self.spark.createDataFrame( pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>") # Catch and check the last UserWarning. user_warns = [ warn.message for warn in warns if isinstance(warn.message, UserWarning)] self.assertTrue(len(user_warns) > 0) self.assertTrue( "Attempting non-optimization" in _exception_message(user_warns[-1])) self.assertEqual(df.collect(), [Row(a={u'a': 1})]) def test_createDataFrame_fallback_disabled(self): import pandas as pd with QuietTest(self.sc): with self.assertRaisesRegexp(TypeError, 'Unsupported type'): self.spark.createDataFrame( pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>") # Regression test for SPARK-23314 def test_timestamp_dst(self): import pandas as pd # Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am dt = [datetime.datetime(2015, 11, 1, 0, 30), datetime.datetime(2015, 11, 1, 1, 30), datetime.datetime(2015, 11, 1, 2, 30)] pdf = pd.DataFrame({'time': dt}) df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time') df_from_pandas = self.spark.createDataFrame(pdf) self.assertPandasEqual(pdf, df_from_python.toPandas()) self.assertPandasEqual(pdf, df_from_pandas.toPandas()) @unittest.skipIf( not _have_pandas or not _have_pyarrow, _pandas_requirement_message or _pyarrow_requirement_message) class PandasUDFTests(ReusedSQLTestCase): def test_pandas_udf_basic(self): from pyspark.rdd import PythonEvalType from pyspark.sql.functions import pandas_udf, PandasUDFType udf = pandas_udf(lambda x: x, DoubleType()) self.assertEqual(udf.returnType, DoubleType()) self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR) self.assertEqual(udf.returnType, DoubleType()) self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR) self.assertEqual(udf.returnType, DoubleType()) self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]), PandasUDFType.GROUPED_MAP) self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())])) self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP) self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())])) self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) udf = pandas_udf(lambda x: x, 'v double', functionType=PandasUDFType.GROUPED_MAP) self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())])) self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) udf = pandas_udf(lambda x: x, returnType='v double', functionType=PandasUDFType.GROUPED_MAP) self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())])) self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) def test_pandas_udf_decorator(self): from pyspark.rdd import PythonEvalType from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.types import StructType, StructField, DoubleType @pandas_udf(DoubleType()) def foo(x): return x self.assertEqual(foo.returnType, DoubleType()) self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) @pandas_udf(returnType=DoubleType()) def foo(x): return x self.assertEqual(foo.returnType, DoubleType()) self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) schema = StructType([StructField("v", DoubleType())]) @pandas_udf(schema, PandasUDFType.GROUPED_MAP) def foo(x): return x self.assertEqual(foo.returnType, schema) self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) @pandas_udf('v double', PandasUDFType.GROUPED_MAP) def foo(x): return x self.assertEqual(foo.returnType, schema) self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) @pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP) def foo(x): return x self.assertEqual(foo.returnType, schema) self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) @pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR) def foo(x): return x self.assertEqual(foo.returnType, DoubleType()) self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) @pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP) def foo(x): return x self.assertEqual(foo.returnType, schema) self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF) def test_udf_wrong_arg(self): from pyspark.sql.functions import pandas_udf, PandasUDFType with QuietTest(self.sc): with self.assertRaises(ParseException): @pandas_udf('blah') def foo(x): return x with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'): @pandas_udf(functionType=PandasUDFType.SCALAR) def foo(x): return x with self.assertRaisesRegexp(ValueError, 'Invalid functionType'): @pandas_udf('double', 100) def foo(x): return x with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'): pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR) with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'): @pandas_udf(LongType(), PandasUDFType.SCALAR) def zero_with_type(): return 1 with self.assertRaisesRegexp(TypeError, 'Invalid returnType'): @pandas_udf(returnType=PandasUDFType.GROUPED_MAP) def foo(df): return df with self.assertRaisesRegexp(TypeError, 'Invalid returnType'): @pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP) def foo(df): return df with self.assertRaisesRegexp(ValueError, 'Invalid function'): @pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP) def foo(k, v, w): return k @unittest.skipIf( not _have_pandas or not _have_pyarrow, _pandas_requirement_message or _pyarrow_requirement_message) class ScalarPandasUDFTests(ReusedSQLTestCase): @classmethod def setUpClass(cls): ReusedSQLTestCase.setUpClass() # Synchronize default timezone between Python and Java cls.tz_prev = os.environ.get("TZ", None) # save current tz if set tz = "America/Los_Angeles" os.environ["TZ"] = tz time.tzset() cls.sc.environment["TZ"] = tz cls.spark.conf.set("spark.sql.session.timeZone", tz) @classmethod def tearDownClass(cls): del os.environ["TZ"] if cls.tz_prev is not None: os.environ["TZ"] = cls.tz_prev time.tzset() ReusedSQLTestCase.tearDownClass() @property def nondeterministic_vectorized_udf(self): from pyspark.sql.functions import pandas_udf @pandas_udf('double') def random_udf(v): import pandas as pd import numpy as np return pd.Series(np.random.random(len(v))) random_udf = random_udf.asNondeterministic() return random_udf def test_vectorized_udf_basic(self): from pyspark.sql.functions import pandas_udf, col, array df = self.spark.range(10).select( col('id').cast('string').alias('str'), col('id').cast('int').alias('int'), col('id').alias('long'), col('id').cast('float').alias('float'), col('id').cast('double').alias('double'), col('id').cast('decimal').alias('decimal'), col('id').cast('boolean').alias('bool'), array(col('id')).alias('array_long')) f = lambda x: x str_f = pandas_udf(f, StringType()) int_f = pandas_udf(f, IntegerType()) long_f = pandas_udf(f, LongType()) float_f = pandas_udf(f, FloatType()) double_f = pandas_udf(f, DoubleType()) decimal_f = pandas_udf(f, DecimalType()) bool_f = pandas_udf(f, BooleanType()) array_long_f = pandas_udf(f, ArrayType(LongType())) res = df.select(str_f(col('str')), int_f(col('int')), long_f(col('long')), float_f(col('float')), double_f(col('double')), decimal_f('decimal'), bool_f(col('bool')), array_long_f('array_long')) self.assertEquals(df.collect(), res.collect()) def test_register_nondeterministic_vectorized_udf_basic(self): from pyspark.sql.functions import pandas_udf from pyspark.rdd import PythonEvalType import random random_pandas_udf = pandas_udf( lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic() self.assertEqual(random_pandas_udf.deterministic, False) self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) nondeterministic_pandas_udf = self.spark.catalog.registerFunction( "randomPandasUDF", random_pandas_udf) self.assertEqual(nondeterministic_pandas_udf.deterministic, False) self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) [row] = self.spark.sql("SELECT randomPandasUDF(1)").collect() self.assertEqual(row[0], 7) def test_vectorized_udf_null_boolean(self): from pyspark.sql.functions import pandas_udf, col data = [(True,), (True,), (None,), (False,)] schema = StructType().add("bool", BooleanType()) df = self.spark.createDataFrame(data, schema) bool_f = pandas_udf(lambda x: x, BooleanType()) res = df.select(bool_f(col('bool'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_byte(self): from pyspark.sql.functions import pandas_udf, col data = [(None,), (2,), (3,), (4,)] schema = StructType().add("byte", ByteType()) df = self.spark.createDataFrame(data, schema) byte_f = pandas_udf(lambda x: x, ByteType()) res = df.select(byte_f(col('byte'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_short(self): from pyspark.sql.functions import pandas_udf, col data = [(None,), (2,), (3,), (4,)] schema = StructType().add("short", ShortType()) df = self.spark.createDataFrame(data, schema) short_f = pandas_udf(lambda x: x, ShortType()) res = df.select(short_f(col('short'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_int(self): from pyspark.sql.functions import pandas_udf, col data = [(None,), (2,), (3,), (4,)] schema = StructType().add("int", IntegerType()) df = self.spark.createDataFrame(data, schema) int_f = pandas_udf(lambda x: x, IntegerType()) res = df.select(int_f(col('int'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_long(self): from pyspark.sql.functions import pandas_udf, col data = [(None,), (2,), (3,), (4,)] schema = StructType().add("long", LongType()) df = self.spark.createDataFrame(data, schema) long_f = pandas_udf(lambda x: x, LongType()) res = df.select(long_f(col('long'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_float(self): from pyspark.sql.functions import pandas_udf, col data = [(3.0,), (5.0,), (-1.0,), (None,)] schema = StructType().add("float", FloatType()) df = self.spark.createDataFrame(data, schema) float_f = pandas_udf(lambda x: x, FloatType()) res = df.select(float_f(col('float'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_double(self): from pyspark.sql.functions import pandas_udf, col data = [(3.0,), (5.0,), (-1.0,), (None,)] schema = StructType().add("double", DoubleType()) df = self.spark.createDataFrame(data, schema) double_f = pandas_udf(lambda x: x, DoubleType()) res = df.select(double_f(col('double'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_decimal(self): from decimal import Decimal from pyspark.sql.functions import pandas_udf, col data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)] schema = StructType().add("decimal", DecimalType(38, 18)) df = self.spark.createDataFrame(data, schema) decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18)) res = df.select(decimal_f(col('decimal'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_null_string(self): from pyspark.sql.functions import pandas_udf, col data = [("foo",), (None,), ("bar",), ("bar",)] schema = StructType().add("str", StringType()) df = self.spark.createDataFrame(data, schema) str_f = pandas_udf(lambda x: x, StringType()) res = df.select(str_f(col('str'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_string_in_udf(self): from pyspark.sql.functions import pandas_udf, col import pandas as pd df = self.spark.range(10) str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType()) actual = df.select(str_f(col('id'))) expected = df.select(col('id').cast('string')) self.assertEquals(expected.collect(), actual.collect()) def test_vectorized_udf_datatype_string(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.range(10).select( col('id').cast('string').alias('str'), col('id').cast('int').alias('int'), col('id').alias('long'), col('id').cast('float').alias('float'), col('id').cast('double').alias('double'), col('id').cast('decimal').alias('decimal'), col('id').cast('boolean').alias('bool')) f = lambda x: x str_f = pandas_udf(f, 'string') int_f = pandas_udf(f, 'integer') long_f = pandas_udf(f, 'long') float_f = pandas_udf(f, 'float') double_f = pandas_udf(f, 'double') decimal_f = pandas_udf(f, 'decimal(38, 18)') bool_f = pandas_udf(f, 'boolean') res = df.select(str_f(col('str')), int_f(col('int')), long_f(col('long')), float_f(col('float')), double_f(col('double')), decimal_f('decimal'), bool_f(col('bool'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_array_type(self): from pyspark.sql.functions import pandas_udf, col data = [([1, 2],), ([3, 4],)] array_schema = StructType([StructField("array", ArrayType(IntegerType()))]) df = self.spark.createDataFrame(data, schema=array_schema) array_f = pandas_udf(lambda x: x, ArrayType(IntegerType())) result = df.select(array_f(col('array'))) self.assertEquals(df.collect(), result.collect()) def test_vectorized_udf_null_array(self): from pyspark.sql.functions import pandas_udf, col data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)] array_schema = StructType([StructField("array", ArrayType(IntegerType()))]) df = self.spark.createDataFrame(data, schema=array_schema) array_f = pandas_udf(lambda x: x, ArrayType(IntegerType())) result = df.select(array_f(col('array'))) self.assertEquals(df.collect(), result.collect()) def test_vectorized_udf_complex(self): from pyspark.sql.functions import pandas_udf, col, expr df = self.spark.range(10).select( col('id').cast('int').alias('a'), col('id').cast('int').alias('b'), col('id').cast('double').alias('c')) add = pandas_udf(lambda x, y: x + y, IntegerType()) power2 = pandas_udf(lambda x: 2 ** x, IntegerType()) mul = pandas_udf(lambda x, y: x * y, DoubleType()) res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c'))) expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c')) self.assertEquals(expected.collect(), res.collect()) def test_vectorized_udf_exception(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.range(10) raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType()) with QuietTest(self.sc): with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'): df.select(raise_exception(col('id'))).collect() def test_vectorized_udf_invalid_length(self): from pyspark.sql.functions import pandas_udf, col import pandas as pd df = self.spark.range(10) raise_exception = pandas_udf(lambda _: pd.Series(1), LongType()) with QuietTest(self.sc): with self.assertRaisesRegexp( Exception, 'Result vector from pandas_udf was not the required length'): df.select(raise_exception(col('id'))).collect() def test_vectorized_udf_mix_udf(self): from pyspark.sql.functions import pandas_udf, udf, col df = self.spark.range(10) row_by_row_udf = udf(lambda x: x, LongType()) pd_udf = pandas_udf(lambda x: x, LongType()) with QuietTest(self.sc): with self.assertRaisesRegexp( Exception, 'Can not mix vectorized and non-vectorized UDFs'): df.select(row_by_row_udf(col('id')), pd_udf(col('id'))).collect() def test_vectorized_udf_chained(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.range(10) f = pandas_udf(lambda x: x + 1, LongType()) g = pandas_udf(lambda x: x - 1, LongType()) res = df.select(g(f(col('id')))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_wrong_return_type(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.range(10) with QuietTest(self.sc): with self.assertRaisesRegexp( NotImplementedError, 'Invalid returnType.*scalar Pandas UDF.*MapType'): pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType())) def test_vectorized_udf_return_scalar(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.range(10) f = pandas_udf(lambda x: 1.0, DoubleType()) with QuietTest(self.sc): with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'): df.select(f(col('id'))).collect() def test_vectorized_udf_decorator(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.range(10) @pandas_udf(returnType=LongType()) def identity(x): return x res = df.select(identity(col('id'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_empty_partition(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2)) f = pandas_udf(lambda x: x, LongType()) res = df.select(f(col('id'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_varargs(self): from pyspark.sql.functions import pandas_udf, col df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2)) f = pandas_udf(lambda *v: v[0], LongType()) res = df.select(f(col('id'))) self.assertEquals(df.collect(), res.collect()) def test_vectorized_udf_unsupported_types(self): from pyspark.sql.functions import pandas_udf with QuietTest(self.sc): with self.assertRaisesRegexp( NotImplementedError, 'Invalid returnType.*scalar Pandas UDF.*MapType'): pandas_udf(lambda x: x, MapType(StringType(), IntegerType())) with QuietTest(self.sc): with self.assertRaisesRegexp( NotImplementedError, 'Invalid returnType.*scalar Pandas UDF.*BinaryType'): pandas_udf(lambda x: x, BinaryType()) def test_vectorized_udf_dates(self): from pyspark.sql.functions import pandas_udf, col from datetime import date schema = StructType().add("idx", LongType()).add("date", DateType()) data = [(0, date(1969, 1, 1),), (1, date(2012, 2, 2),), (2, None,), (3, date(2100, 4, 4),)] df = self.spark.createDataFrame(data, schema=schema) date_copy = pandas_udf(lambda t: t, returnType=DateType()) df = df.withColumn("date_copy", date_copy(col("date"))) @pandas_udf(returnType=StringType()) def check_data(idx, date, date_copy): import pandas as pd msgs = [] is_equal = date.isnull() for i in range(len(idx)): if (is_equal[i] and data[idx[i]][1] is None) or \ date[i] == data[idx[i]][1]: msgs.append(None) else: msgs.append( "date values are not equal (date='%s': data[%d][1]='%s')" % (date[i], idx[i], data[idx[i]][1])) return pd.Series(msgs) result = df.withColumn("check_data", check_data(col("idx"), col("date"), col("date_copy"))).collect() self.assertEquals(len(data), len(result)) for i in range(len(result)): self.assertEquals(data[i][1], result[i][1]) # "date" col self.assertEquals(data[i][1], result[i][2]) # "date_copy" col self.assertIsNone(result[i][3]) # "check_data" col def test_vectorized_udf_timestamps(self): from pyspark.sql.functions import pandas_udf, col from datetime import datetime schema = StructType([ StructField("idx", LongType(), True), StructField("timestamp", TimestampType(), True)]) data = [(0, datetime(1969, 1, 1, 1, 1, 1)), (1, datetime(2012, 2, 2, 2, 2, 2)), (2, None), (3, datetime(2100, 3, 3, 3, 3, 3))] df = self.spark.createDataFrame(data, schema=schema) # Check that a timestamp passed through a pandas_udf will not be altered by timezone calc f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType()) df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp"))) @pandas_udf(returnType=StringType()) def check_data(idx, timestamp, timestamp_copy): import pandas as pd msgs = [] is_equal = timestamp.isnull() # use this array to check values are equal for i in range(len(idx)): # Check that timestamps are as expected in the UDF if (is_equal[i] and data[idx[i]][1] is None) or \ timestamp[i].to_pydatetime() == data[idx[i]][1]: msgs.append(None) else: msgs.append( "timestamp values are not equal (timestamp='%s': data[%d][1]='%s')" % (timestamp[i], idx[i], data[idx[i]][1])) return pd.Series(msgs) result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"), col("timestamp_copy"))).collect() # Check that collection values are correct self.assertEquals(len(data), len(result)) for i in range(len(result)): self.assertEquals(data[i][1], result[i][1]) # "timestamp" col self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col self.assertIsNone(result[i][3]) # "check_data" col def test_vectorized_udf_return_timestamp_tz(self): from pyspark.sql.functions import pandas_udf, col import pandas as pd df = self.spark.range(10) @pandas_udf(returnType=TimestampType()) def gen_timestamps(id): ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id] return pd.Series(ts) result = df.withColumn("ts", gen_timestamps(col("id"))).collect() spark_ts_t = TimestampType() for r in result: i, ts = r ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime() expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz)) self.assertEquals(expected, ts) def test_vectorized_udf_check_config(self): from pyspark.sql.functions import pandas_udf, col import pandas as pd with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}): df = self.spark.range(10, numPartitions=1) @pandas_udf(returnType=LongType()) def check_records_per_batch(x): return pd.Series(x.size).repeat(x.size) result = df.select(check_records_per_batch(col("id"))).collect() for (r,) in result: self.assertTrue(r <= 3) def test_vectorized_udf_timestamps_respect_session_timezone(self): from pyspark.sql.functions import pandas_udf, col from datetime import datetime import pandas as pd schema = StructType([ StructField("idx", LongType(), True), StructField("timestamp", TimestampType(), True)]) data = [(1, datetime(1969, 1, 1, 1, 1, 1)), (2, datetime(2012, 2, 2, 2, 2, 2)), (3, None), (4, datetime(2100, 3, 3, 3, 3, 3))] df = self.spark.createDataFrame(data, schema=schema) f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType()) internal_value = pandas_udf( lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType()) timezone = "America/New_York" with self.sql_conf({ "spark.sql.execution.pandas.respectSessionTimeZone": False, "spark.sql.session.timeZone": timezone}): df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \ .withColumn("internal_value", internal_value(col("timestamp"))) result_la = df_la.select(col("idx"), col("internal_value")).collect() # Correct result_la by adjusting 3 hours difference between Los Angeles and New York diff = 3 * 60 * 60 * 1000 * 1000 * 1000 result_la_corrected = \ df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect() with self.sql_conf({ "spark.sql.execution.pandas.respectSessionTimeZone": True, "spark.sql.session.timeZone": timezone}): df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \ .withColumn("internal_value", internal_value(col("timestamp"))) result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect() self.assertNotEqual(result_ny, result_la) self.assertEqual(result_ny, result_la_corrected) def test_nondeterministic_vectorized_udf(self): # Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations from pyspark.sql.functions import udf, pandas_udf, col @pandas_udf('double') def plus_ten(v): return v + 10 random_udf = self.nondeterministic_vectorized_udf df = self.spark.range(10).withColumn('rand', random_udf(col('id'))) result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas() self.assertEqual(random_udf.deterministic, False) self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10)) def test_nondeterministic_vectorized_udf_in_aggregate(self): from pyspark.sql.functions import pandas_udf, sum df = self.spark.range(10) random_udf = self.nondeterministic_vectorized_udf with QuietTest(self.sc): with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'): df.groupby(df.id).agg(sum(random_udf(df.id))).collect() with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'): df.agg(sum(random_udf(df.id))).collect() def test_register_vectorized_udf_basic(self): from pyspark.rdd import PythonEvalType from pyspark.sql.functions import pandas_udf, col, expr df = self.spark.range(10).select( col('id').cast('int').alias('a'), col('id').cast('int').alias('b')) original_add = pandas_udf(lambda x, y: x + y, IntegerType()) self.assertEqual(original_add.deterministic, True) self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF) new_add = self.spark.catalog.registerFunction("add1", original_add) res1 = df.select(new_add(col('a'), col('b'))) res2 = self.spark.sql( "SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t") expected = df.select(expr('a + b')) self.assertEquals(expected.collect(), res1.collect()) self.assertEquals(expected.collect(), res2.collect()) # Regression test for SPARK-23314 def test_timestamp_dst(self): from pyspark.sql.functions import pandas_udf # Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am dt = [datetime.datetime(2015, 11, 1, 0, 30), datetime.datetime(2015, 11, 1, 1, 30), datetime.datetime(2015, 11, 1, 2, 30)] df = self.spark.createDataFrame(dt, 'timestamp').toDF('time') foo_udf = pandas_udf(lambda x: x, 'timestamp') result = df.withColumn('time', foo_udf(df.time)) self.assertEquals(df.collect(), result.collect()) @unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.") def test_type_annotation(self): from pyspark.sql.functions import pandas_udf # Regression test to check if type hints can be used. See SPARK-23569. # Note that it throws an error during compilation in lower Python versions if 'exec' # is not used. Also, note that we explicitly use another dictionary to avoid modifications # in the current 'locals()'. # # Hyukjin: I think it's an ugly way to test issues about syntax specific in # higher versions of Python, which we shouldn't encourage. This was the last resort # I could come up with at that time. _locals = {} exec( "import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col", _locals) df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id')) self.assertEqual(df.first()[0], 0) @unittest.skipIf( not _have_pandas or not _have_pyarrow, _pandas_requirement_message or _pyarrow_requirement_message) class GroupedMapPandasUDFTests(ReusedSQLTestCase): @property def data(self): from pyspark.sql.functions import array, explode, col, lit return self.spark.range(10).toDF('id') \ .withColumn("vs", array([lit(i) for i in range(20, 30)])) \ .withColumn("v", explode(col('vs'))).drop('vs') def test_supported_types(self): from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col df = self.data.withColumn("arr", array(col("id"))) # Different forms of group map pandas UDF, results of these are the same output_schema = StructType( [StructField('id', LongType()), StructField('v', IntegerType()), StructField('arr', ArrayType(LongType())), StructField('v1', DoubleType()), StructField('v2', LongType())]) udf1 = pandas_udf( lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id), output_schema, PandasUDFType.GROUPED_MAP ) udf2 = pandas_udf( lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id), output_schema, PandasUDFType.GROUPED_MAP ) udf3 = pandas_udf( lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id), output_schema, PandasUDFType.GROUPED_MAP ) result1 = df.groupby('id').apply(udf1).sort('id').toPandas() expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True) result2 = df.groupby('id').apply(udf2).sort('id').toPandas() expected2 = expected1 result3 = df.groupby('id').apply(udf3).sort('id').toPandas() expected3 = expected1 self.assertPandasEqual(expected1, result1) self.assertPandasEqual(expected2, result2) self.assertPandasEqual(expected3, result3) def test_register_grouped_map_udf(self): from pyspark.sql.functions import pandas_udf, PandasUDFType foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP) with QuietTest(self.sc): with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or ' 'SQL_SCALAR_PANDAS_UDF'): self.spark.catalog.registerFunction("foo_udf", foo_udf) def test_decorator(self): from pyspark.sql.functions import pandas_udf, PandasUDFType df = self.data @pandas_udf( 'id long, v int, v1 double, v2 long', PandasUDFType.GROUPED_MAP ) def foo(pdf): return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id) result = df.groupby('id').apply(foo).sort('id').toPandas() expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True) self.assertPandasEqual(expected, result) def test_coerce(self): from pyspark.sql.functions import pandas_udf, PandasUDFType df = self.data foo = pandas_udf( lambda pdf: pdf, 'id long, v double', PandasUDFType.GROUPED_MAP ) result = df.groupby('id').apply(foo).sort('id').toPandas() expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True) expected = expected.assign(v=expected.v.astype('float64')) self.assertPandasEqual(expected, result) def test_complex_groupby(self): from pyspark.sql.functions import pandas_udf, col, PandasUDFType df = self.data @pandas_udf( 'id long, v int, norm double', PandasUDFType.GROUPED_MAP ) def normalize(pdf): v = pdf.v return pdf.assign(norm=(v - v.mean()) / v.std()) result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas() pdf = df.toPandas() expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func) expected = expected.sort_values(['id', 'v']).reset_index(drop=True) expected = expected.assign(norm=expected.norm.astype('float64')) self.assertPandasEqual(expected, result) def test_empty_groupby(self): from pyspark.sql.functions import pandas_udf, col, PandasUDFType df = self.data @pandas_udf( 'id long, v int, norm double', PandasUDFType.GROUPED_MAP ) def normalize(pdf): v = pdf.v return pdf.assign(norm=(v - v.mean()) / v.std()) result = df.groupby().apply(normalize).sort('id', 'v').toPandas() pdf = df.toPandas() expected = normalize.func(pdf) expected = expected.sort_values(['id', 'v']).reset_index(drop=True) expected = expected.assign(norm=expected.norm.astype('float64')) self.assertPandasEqual(expected, result) def test_datatype_string(self): from pyspark.sql.functions import pandas_udf, PandasUDFType df = self.data foo_udf = pandas_udf( lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id), 'id long, v int, v1 double, v2 long', PandasUDFType.GROUPED_MAP ) result = df.groupby('id').apply(foo_udf).sort('id').toPandas() expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True) self.assertPandasEqual(expected, result) def test_wrong_return_type(self): from pyspark.sql.functions import pandas_udf, PandasUDFType with QuietTest(self.sc): with self.assertRaisesRegexp( NotImplementedError, 'Invalid returnType.*grouped map Pandas UDF.*MapType'): pandas_udf( lambda pdf: pdf, 'id long, v map<int, int>', PandasUDFType.GROUPED_MAP) def test_wrong_args(self): from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType df = self.data with QuietTest(self.sc): with self.assertRaisesRegexp(ValueError, 'Invalid udf'): df.groupby('id').apply(lambda x: x) with self.assertRaisesRegexp(ValueError, 'Invalid udf'): df.groupby('id').apply(udf(lambda x: x, DoubleType())) with self.assertRaisesRegexp(ValueError, 'Invalid udf'): df.groupby('id').apply(sum(df.v)) with self.assertRaisesRegexp(ValueError, 'Invalid udf'): df.groupby('id').apply(df.v + 1) with self.assertRaisesRegexp(ValueError, 'Invalid function'): df.groupby('id').apply( pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())]))) with self.assertRaisesRegexp(ValueError, 'Invalid udf'): df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType())) with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'): df.groupby('id').apply( pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR)) def test_unsupported_types(self): from pyspark.sql.functions import pandas_udf, PandasUDFType schema = StructType( [StructField("id", LongType(), True), StructField("map", MapType(StringType(), IntegerType()), True)]) with QuietTest(self.sc): with self.assertRaisesRegexp( NotImplementedError, 'Invalid returnType.*grouped map Pandas UDF.*MapType'): pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP) schema = StructType( [StructField("id", LongType(), True), StructField("arr_ts", ArrayType(TimestampType()), True)]) with QuietTest(self.sc): with self.assertRaisesRegexp( NotImplementedError, 'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'): pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP) # Regression test for SPARK-23314 def test_timestamp_dst(self): from pyspark.sql.functions import pandas_udf, PandasUDFType # Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am dt = [datetime.datetime(2015, 11, 1, 0, 30), datetime.datetime(2015, 11, 1, 1, 30), datetime.datetime(2015, 11, 1, 2, 30)] df = self.spark.createDataFrame(dt, 'timestamp').toDF('time') foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP) result = df.groupby('time').apply(foo_udf).sort('time') self.assertPandasEqual(df.toPandas(), result.toPandas()) def test_udf_with_key(self): from pyspark.sql.functions import pandas_udf, col, PandasUDFType df = self.data pdf = df.toPandas() def foo1(key, pdf): import numpy as np assert type(key) == tuple assert type(key[0]) == np.int64 return pdf.assign(v1=key[0], v2=pdf.v * key[0], v3=pdf.v * pdf.id, v4=pdf.v * pdf.id.mean()) def foo2(key, pdf): import numpy as np assert type(key) == tuple assert type(key[0]) == np.int64 assert type(key[1]) == np.int32 return pdf.assign(v1=key[0], v2=key[1], v3=pdf.v * key[0], v4=pdf.v + key[1]) def foo3(key, pdf): assert type(key) == tuple assert len(key) == 0 return pdf.assign(v1=pdf.v * pdf.id) # v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32> # v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64> udf1 = pandas_udf( foo1, 'id long, v int, v1 long, v2 int, v3 long, v4 double', PandasUDFType.GROUPED_MAP) udf2 = pandas_udf( foo2, 'id long, v int, v1 long, v2 int, v3 int, v4 int', PandasUDFType.GROUPED_MAP) udf3 = pandas_udf( foo3, 'id long, v int, v1 long', PandasUDFType.GROUPED_MAP) # Test groupby column result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas() expected1 = pdf.groupby('id')\ .apply(lambda x: udf1.func((x.id.iloc[0],), x))\ .sort_values(['id', 'v']).reset_index(drop=True) self.assertPandasEqual(expected1, result1) # Test groupby expression result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas() expected2 = pdf.groupby(pdf.id % 2)\ .apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\ .sort_values(['id', 'v']).reset_index(drop=True) self.assertPandasEqual(expected2, result2) # Test complex groupby result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas() expected3 = pdf.groupby([pdf.id, pdf.v % 2])\ .apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\ .sort_values(['id', 'v']).reset_index(drop=True) self.assertPandasEqual(expected3, result3) # Test empty groupby result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas() expected4 = udf3.func((), pdf) self.assertPandasEqual(expected4, result4) @unittest.skipIf( not _have_pandas or not _have_pyarrow, _pandas_requirement_message or _pyarrow_requirement_message) class GroupedAggPandasUDFTests(ReusedSQLTestCase): @property def data(self): from pyspark.sql.functions import array, explode, col, lit return self.spark.range(10).toDF('id') \ .withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \ .withColumn("v", explode(col('vs'))) \ .drop('vs') \ .withColumn('w', lit(1.0)) @property def python_plus_one(self): from pyspark.sql.functions import udf @udf('double') def plus_one(v): assert isinstance(v, (int, float)) return v + 1 return plus_one @property def pandas_scalar_plus_two(self): import pandas as pd from pyspark.sql.functions import pandas_udf, PandasUDFType @pandas_udf('double', PandasUDFType.SCALAR) def plus_two(v): assert isinstance(v, pd.Series) return v + 2 return plus_two @property def pandas_agg_mean_udf(self): from pyspark.sql.functions import pandas_udf, PandasUDFType @pandas_udf('double', PandasUDFType.GROUPED_AGG) def avg(v): return v.mean() return avg @property def pandas_agg_sum_udf(self): from pyspark.sql.functions import pandas_udf, PandasUDFType @pandas_udf('double', PandasUDFType.GROUPED_AGG) def sum(v): return v.sum() return sum @property def pandas_agg_weighted_mean_udf(self): import numpy as np from pyspark.sql.functions import pandas_udf, PandasUDFType @pandas_udf('double', PandasUDFType.GROUPED_AGG) def weighted_mean(v, w): return np.average(v, weights=w) return weighted_mean def test_manual(self): from pyspark.sql.functions import pandas_udf, array df = self.data sum_udf = self.pandas_agg_sum_udf mean_udf = self.pandas_agg_mean_udf mean_arr_udf = pandas_udf( self.pandas_agg_mean_udf.func, ArrayType(self.pandas_agg_mean_udf.returnType), self.pandas_agg_mean_udf.evalType) result1 = df.groupby('id').agg( sum_udf(df.v), mean_udf(df.v), mean_arr_udf(array(df.v))).sort('id') expected1 = self.spark.createDataFrame( [[0, 245.0, 24.5, [24.5]], [1, 255.0, 25.5, [25.5]], [2, 265.0, 26.5, [26.5]], [3, 275.0, 27.5, [27.5]], [4, 285.0, 28.5, [28.5]], [5, 295.0, 29.5, [29.5]], [6, 305.0, 30.5, [30.5]], [7, 315.0, 31.5, [31.5]], [8, 325.0, 32.5, [32.5]], [9, 335.0, 33.5, [33.5]]], ['id', 'sum(v)', 'avg(v)', 'avg(array(v))']) self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) def test_basic(self): from pyspark.sql.functions import col, lit, sum, mean df = self.data weighted_mean_udf = self.pandas_agg_weighted_mean_udf # Groupby one column and aggregate one UDF with literal result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id') expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id') self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) # Groupby one expression and aggregate one UDF with literal result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\ .sort(df.id + 1) expected2 = df.groupby((col('id') + 1))\ .agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1) self.assertPandasEqual(expected2.toPandas(), result2.toPandas()) # Groupby one column and aggregate one UDF without literal result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id') expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id') self.assertPandasEqual(expected3.toPandas(), result3.toPandas()) # Groupby one expression and aggregate one UDF without literal result4 = df.groupby((col('id') + 1).alias('id'))\ .agg(weighted_mean_udf(df.v, df.w))\ .sort('id') expected4 = df.groupby((col('id') + 1).alias('id'))\ .agg(mean(df.v).alias('weighted_mean(v, w)'))\ .sort('id') self.assertPandasEqual(expected4.toPandas(), result4.toPandas()) def test_unsupported_types(self): from pyspark.sql.types import DoubleType, MapType from pyspark.sql.functions import pandas_udf, PandasUDFType with QuietTest(self.sc): with self.assertRaisesRegexp(NotImplementedError, 'not supported'): pandas_udf( lambda x: x, ArrayType(ArrayType(TimestampType())), PandasUDFType.GROUPED_AGG) with QuietTest(self.sc): with self.assertRaisesRegexp(NotImplementedError, 'not supported'): @pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG) def mean_and_std_udf(v): return v.mean(), v.std() with QuietTest(self.sc): with self.assertRaisesRegexp(NotImplementedError, 'not supported'): @pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG) def mean_and_std_udf(v): return {v.mean(): v.std()} def test_alias(self): from pyspark.sql.functions import mean df = self.data mean_udf = self.pandas_agg_mean_udf result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias')) expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias')) self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) def test_mixed_sql(self): """ Test mixing group aggregate pandas UDF with sql expression. """ from pyspark.sql.functions import sum, mean df = self.data sum_udf = self.pandas_agg_sum_udf # Mix group aggregate pandas UDF with sql expression result1 = (df.groupby('id') .agg(sum_udf(df.v) + 1) .sort('id')) expected1 = (df.groupby('id') .agg(sum(df.v) + 1) .sort('id')) # Mix group aggregate pandas UDF with sql expression (order swapped) result2 = (df.groupby('id') .agg(sum_udf(df.v + 1)) .sort('id')) expected2 = (df.groupby('id') .agg(sum(df.v + 1)) .sort('id')) # Wrap group aggregate pandas UDF with two sql expressions result3 = (df.groupby('id') .agg(sum_udf(df.v + 1) + 2) .sort('id')) expected3 = (df.groupby('id') .agg(sum(df.v + 1) + 2) .sort('id')) self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) self.assertPandasEqual(expected2.toPandas(), result2.toPandas()) self.assertPandasEqual(expected3.toPandas(), result3.toPandas()) def test_mixed_udfs(self): """ Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF. """ from pyspark.sql.functions import sum, mean df = self.data plus_one = self.python_plus_one plus_two = self.pandas_scalar_plus_two sum_udf = self.pandas_agg_sum_udf # Mix group aggregate pandas UDF and python UDF result1 = (df.groupby('id') .agg(plus_one(sum_udf(df.v))) .sort('id')) expected1 = (df.groupby('id') .agg(plus_one(sum(df.v))) .sort('id')) # Mix group aggregate pandas UDF and python UDF (order swapped) result2 = (df.groupby('id') .agg(sum_udf(plus_one(df.v))) .sort('id')) expected2 = (df.groupby('id') .agg(sum(plus_one(df.v))) .sort('id')) # Mix group aggregate pandas UDF and scalar pandas UDF result3 = (df.groupby('id') .agg(sum_udf(plus_two(df.v))) .sort('id')) expected3 = (df.groupby('id') .agg(sum(plus_two(df.v))) .sort('id')) # Mix group aggregate pandas UDF and scalar pandas UDF (order swapped) result4 = (df.groupby('id') .agg(plus_two(sum_udf(df.v))) .sort('id')) expected4 = (df.groupby('id') .agg(plus_two(sum(df.v))) .sort('id')) # Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby result5 = (df.groupby(plus_one(df.id)) .agg(plus_one(sum_udf(plus_one(df.v)))) .sort('plus_one(id)')) expected5 = (df.groupby(plus_one(df.id)) .agg(plus_one(sum(plus_one(df.v)))) .sort('plus_one(id)')) # Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in # groupby result6 = (df.groupby(plus_two(df.id)) .agg(plus_two(sum_udf(plus_two(df.v)))) .sort('plus_two(id)')) expected6 = (df.groupby(plus_two(df.id)) .agg(plus_two(sum(plus_two(df.v)))) .sort('plus_two(id)')) self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) self.assertPandasEqual(expected2.toPandas(), result2.toPandas()) self.assertPandasEqual(expected3.toPandas(), result3.toPandas()) self.assertPandasEqual(expected4.toPandas(), result4.toPandas()) self.assertPandasEqual(expected5.toPandas(), result5.toPandas()) self.assertPandasEqual(expected6.toPandas(), result6.toPandas()) def test_multiple_udfs(self): """ Test multiple group aggregate pandas UDFs in one agg function. """ from pyspark.sql.functions import col, lit, sum, mean df = self.data mean_udf = self.pandas_agg_mean_udf sum_udf = self.pandas_agg_sum_udf weighted_mean_udf = self.pandas_agg_weighted_mean_udf result1 = (df.groupBy('id') .agg(mean_udf(df.v), sum_udf(df.v), weighted_mean_udf(df.v, df.w)) .sort('id') .toPandas()) expected1 = (df.groupBy('id') .agg(mean(df.v), sum(df.v), mean(df.v).alias('weighted_mean(v, w)')) .sort('id') .toPandas()) self.assertPandasEqual(expected1, result1) def test_complex_groupby(self): from pyspark.sql.functions import lit, sum df = self.data sum_udf = self.pandas_agg_sum_udf plus_one = self.python_plus_one plus_two = self.pandas_scalar_plus_two # groupby one expression result1 = df.groupby(df.v % 2).agg(sum_udf(df.v)) expected1 = df.groupby(df.v % 2).agg(sum(df.v)) # empty groupby result2 = df.groupby().agg(sum_udf(df.v)) expected2 = df.groupby().agg(sum(df.v)) # groupby one column and one sql expression result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)) expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)) # groupby one python UDF result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v)) expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v)) # groupby one scalar pandas UDF result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)) expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)) # groupby one expression and one python UDF result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v)) expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v)) # groupby one expression and one scalar pandas UDF result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)') expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)') self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) self.assertPandasEqual(expected2.toPandas(), result2.toPandas()) self.assertPandasEqual(expected3.toPandas(), result3.toPandas()) self.assertPandasEqual(expected4.toPandas(), result4.toPandas()) self.assertPandasEqual(expected5.toPandas(), result5.toPandas()) self.assertPandasEqual(expected6.toPandas(), result6.toPandas()) self.assertPandasEqual(expected7.toPandas(), result7.toPandas()) def test_complex_expressions(self): from pyspark.sql.functions import col, sum df = self.data plus_one = self.python_plus_one plus_two = self.pandas_scalar_plus_two sum_udf = self.pandas_agg_sum_udf # Test complex expressions with sql expression, python UDF and # group aggregate pandas UDF result1 = (df.withColumn('v1', plus_one(df.v)) .withColumn('v2', df.v + 2) .groupby(df.id, df.v % 2) .agg(sum_udf(col('v')), sum_udf(col('v1') + 3), sum_udf(col('v2')) + 5, plus_one(sum_udf(col('v1'))), sum_udf(plus_one(col('v2')))) .sort('id') .toPandas()) expected1 = (df.withColumn('v1', df.v + 1) .withColumn('v2', df.v + 2) .groupby(df.id, df.v % 2) .agg(sum(col('v')), sum(col('v1') + 3), sum(col('v2')) + 5, plus_one(sum(col('v1'))), sum(plus_one(col('v2')))) .sort('id') .toPandas()) # Test complex expressions with sql expression, scala pandas UDF and # group aggregate pandas UDF result2 = (df.withColumn('v1', plus_one(df.v)) .withColumn('v2', df.v + 2) .groupby(df.id, df.v % 2) .agg(sum_udf(col('v')), sum_udf(col('v1') + 3), sum_udf(col('v2')) + 5, plus_two(sum_udf(col('v1'))), sum_udf(plus_two(col('v2')))) .sort('id') .toPandas()) expected2 = (df.withColumn('v1', df.v + 1) .withColumn('v2', df.v + 2) .groupby(df.id, df.v % 2) .agg(sum(col('v')), sum(col('v1') + 3), sum(col('v2')) + 5, plus_two(sum(col('v1'))), sum(plus_two(col('v2')))) .sort('id') .toPandas()) # Test sequential groupby aggregate result3 = (df.groupby('id') .agg(sum_udf(df.v).alias('v')) .groupby('id') .agg(sum_udf(col('v'))) .sort('id') .toPandas()) expected3 = (df.groupby('id') .agg(sum(df.v).alias('v')) .groupby('id') .agg(sum(col('v'))) .sort('id') .toPandas()) self.assertPandasEqual(expected1, result1) self.assertPandasEqual(expected2, result2) self.assertPandasEqual(expected3, result3) def test_retain_group_columns(self): from pyspark.sql.functions import sum, lit, col with self.sql_conf({"spark.sql.retainGroupColumns": False}): df = self.data sum_udf = self.pandas_agg_sum_udf result1 = df.groupby(df.id).agg(sum_udf(df.v)) expected1 = df.groupby(df.id).agg(sum(df.v)) self.assertPandasEqual(expected1.toPandas(), result1.toPandas()) def test_invalid_args(self): from pyspark.sql.functions import mean df = self.data plus_one = self.python_plus_one mean_udf = self.pandas_agg_mean_udf with QuietTest(self.sc): with self.assertRaisesRegexp( AnalysisException, 'nor.*aggregate function'): df.groupby(df.id).agg(plus_one(df.v)).collect() with QuietTest(self.sc): with self.assertRaisesRegexp( AnalysisException, 'aggregate function.*argument.*aggregate function'): df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect() with QuietTest(self.sc): with self.assertRaisesRegexp( AnalysisException, 'mixture.*aggregate function.*group aggregate pandas UDF'): df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect() if __name__ == "__main__": from pyspark.sql.tests import * if xmlrunner: unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2) else: unittest.main(verbosity=2)
apache-2.0
-6,319,921,322,607,896,000
42.323166
100
0.583676
false
pscholl/wsnlab
teams_1516/risiköwast/platypus/software/viz_PPS.py
1
1454
#!/usr/bin/env python2.7 import sys, time, os import numpy as np path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(path, "pps_plot") sys.path.append(path) print sys.path import pps_plot as pplt import matplotlib.dates as mld import pygtk, gtk ## data descriptor for the platypus default data: desc_pps = { 'names': ('t', 'ax', 'ay', 'az', 'gx', 'gy', 'gz', 'l1', 'l2', 'temp', 'press', 'hum'), 'formats': ('f8', 'h', 'h', 'h', 'h', 'h', 'h', 'H', 'H', 'i', 'I', 'I') } try: filename = sys.argv[1] ext = filename[-3:] ext = ext.lower() except: print 'usage: viz_PPS.py <data.npy|data.npz>' quit() if ext=='npy': # simple loading of numpy file: dta = np.load(filename) elif ext=='npz': out = np.load(filename) dta = out['data'] else: print 'file has wrong extension' quit() dta = dta.view(desc_pps, np.recarray) #print dta ## actual plotting here: fig = pplt.PPS_raw_plot(10,8,80) fig.plot(1, 7, dta.t, (np.array((dta.ax,dta.ay,dta.az)).T) / 8192.0 * 9.807,'3D acceleration') fig.plot(2, 7, dta.t, (np.array((dta.gx,dta.gy,dta.gz)).T) / 131.0,'3D gyroscope') fig.plot(3, 7, dta.t, np.array((dta.l1)).T, 'ambient light') fig.plot(4, 7, dta.t, np.array((dta.l2)).T, 'infrared light') fig.plot(5, 7, dta.t, (np.array((dta.temp)).T) / 100.0, 'temperature') fig.plot(6, 7, dta.t, (np.array((dta.press)).T) / 25600.0, 'pressure') fig.plot(7, 7, dta.t, (np.array((dta.hum)).T) / 1024.0, 'humidity') fig.show()
bsd-3-clause
3,116,911,343,090,859,000
27.509804
103
0.614856
false
nickicejones/ENGAGE
ENGAGE2.0/ENGAGE2.0/Main model code/rasterstonumpys.py
1
33581
#---------------------------------------------------------------------# ##### START OF CODE ##### # Import statements import arcpy import csv import datetime import numpy as np from itertools import izip # Simple function to convert rasters to numpys def convert_raster_to_numpy(list_of_rasters): list_of_numpy_arrays = [] for raster in list_of_rasters: if raster and raster != '#': arcpy.AddMessage("Converting " + str(raster) + " raster to numpy array") numpy_raster = arcpy.RasterToNumPyArray(raster, '#', '#', '#', -9999) list_of_numpy_arrays.append(numpy_raster) else: list_of_numpy_arrays.append(raster) arcpy.AddMessage("-------------------------") arcpy.AddMessage("Successfully converted rasters to numpy arrays") arcpy.AddMessage("-------------------------") return list_of_numpy_arrays def convert_numpy_to_raster_dict(list_of_numpys, bottom_left_corner, cell_size, save_date): for name, numpy in list_of_numpys.iteritems(): arcpy.AddMessage("Converting " + str(name) + " numpy array to raster") raster = arcpy.NumPyArrayToRaster(numpy, bottom_left_corner, cell_size, cell_size, -9999) raster.save(name + "_" + str(save_date)) del name, numpy, raster arcpy.AddMessage("-------------------------") arcpy.AddMessage("Successfully converted numpy arrays to rasters") arcpy.AddMessage("-------------------------") def convert_numpy_to_raster_list(list_of_numpys, bottom_left_corner, cell_size, save_date): number = 1 for name in list_of_numpys: arcpy.AddMessage("Converting numpy array to raster") raster = arcpy.NumPyArrayToRaster(name, bottom_left_corner, cell_size, cell_size, -9999) raster.save("testing" + str(number) + "_" + str(save_date)) del name, raster number += 1 arcpy.AddMessage("-------------------------") arcpy.AddMessage("Successfully converted numpy arrays to rasters") arcpy.AddMessage("-------------------------") def convert_numpy_to_raster_single(numpy, output_type, bottom_left_corner, cell_size, save_date): arcpy.AddMessage("Converting " + str(output_type) + " numpy array to raster") raster = arcpy.NumPyArrayToRaster(numpy, bottom_left_corner, cell_size, cell_size, -9999) raster.save(output_type + "_" + str(save_date)) del raster arcpy.AddMessage("-------------------------") arcpy.AddMessage("Successfully converted numpy arrays to rasters") arcpy.AddMessage("-------------------------") def save_discharge_or_sediment_csv(output_excel_discharge, output_excel_sediment): # Set up the discharge location which outputs the value at the bottom of the catchment every day. if output_excel_discharge and output_excel_discharge != "#": output_excel_discharge = output_excel_discharge + "/discharge.csv" daily_discharge = open(output_excel_discharge, 'wb') discharge_spamwriter = csv.writer(daily_discharge, delimiter=',') else: discharge_spamwriter = "#" # Set up the save location for sediment leaving the bottom of the system if output_excel_sediment and output_excel_sediment != "#": output_excel_sediment = output_excel_sediment + "/discharge.csv" daily_sediment = open(output_excel_sediment, 'wb') sediment_spamwriter = csv.writer(daily_sediment, delimiter=',') else: sediment_spamwriter = "#" return discharge_spamwriter, sediment_spamwriter def output_discharge_csv(current_date, discharge_spamwriter, Q_max): if discharge_spamwriter and discharge_spamwriter != "#": discharge_spamwriter.writerow([current_date, Q_max]) arcpy.AddMessage("Daily Discharge Written to CSV") def output_sediment_csv(current_date, sediment_spamwriter, Sed_max): if sediment_spamwriter and sediment_spamwriter != "#": sediment_spamwriter.writerow([current_date, Sed_max]) arcpy.AddMessage("Daily Discharge Written to CSV") # Function to check which outputs are required from the model def raster_outputs(week_day, month_day, year_day, current_date, first_loop, output_file_dict, output_format, output_averages_temp, bottom_left_corner, cell_size, Q_surf_np, Q_dis, depth_recking, precipitation, sediment_depth, net_sediment): # Create a format which says what todays date is daily_save_date = str(current_date.strftime('%d_%m_%Y')) monthly_save_date = str(current_date.strftime('%m_%Y')) year_save_date = str(current_date.strftime('%Y')) tomorrow = current_date + datetime.timedelta(days=1) tomorrow_day = int(tomorrow.strftime('%d')) tomorrow_month = int(tomorrow.strftime('%m')) # Check if empty arrays need to be created to store averages or totals this is only carried out on the first loop if first_loop == True: arcpy.AddMessage("First day of operation checking average output rasters") for output_type, output_frequency in output_file_dict.iteritems(): if str(output_frequency) != 'No output' and str(output_frequency) != 'Daily': if output_type == "Surface_runoff": Q_surf_avg = np.zeros_like(Q_surf_np) np.save(output_averages_temp[0], Q_surf_avg) arcpy.AddMessage(output_type + " raster created") if output_type == "Discharge": Q_dis_avg = np.zeros_like(Q_surf_np) np.save(output_averages_temp[1], Q_dis_avg) arcpy.AddMessage(output_type + " raster created") if output_type == "Water_depth": depth_avg = np.zeros_like(Q_surf_np) np.save(output_averages_temp[2], depth_avg) arcpy.AddMessage(output_type + " raster created") if output_type == "Spatial_precipitation": precipitation_avg = np.zeros_like(Q_surf_np) np.save(output_averages_temp[3], precipitation_avg) arcpy.AddMessage(output_type + " raster created") if output_type == "Sediment_depth": sed_depth_avg = np.zeros_like(Q_surf_np) np.save(output_averages_temp[4], sed_depth_avg) arcpy.AddMessage(output_type + " raster created") if output_type == "Net_sediment": sed_erosion_deposition_avg = np.zeros_like(Q_surf_np) np.save(output_averages_temp[5], sed_erosion_deposition_avg) arcpy.AddMessage(output_type + " raster created") # Load the average arrays if required. if first_loop == False: for output_type, output_frequency in output_file_dict.iteritems(): if str(output_frequency) != 'No output' and str(output_frequency) != 'Daily': if output_type == "Surface_runoff": Q_surf_avg = np.load(output_averages_temp[0]) if output_type == "Discharge": Q_dis_avg = np.load(output_averages_temp[1]) if output_type == "Water_depth": depth_avg = np.load(output_averages_temp[2]) if output_type == "Spatial_precipitation": precipitation_avg = np.load(output_averages_temp[3]) if output_type == "Sediment_depth": sed_depth_avg = np.load(output_averages_temp[4]) if output_type == "Net_sediment": sed_erosion_deposition_avg = np.load(output_averages_temp[5]) # Add one onto the weekly/monthly/yearly day counter week_day = week_day + 1 month_day = month_day + 1 year_day = year_day + 1 for output_type, output_frequency in output_file_dict.iteritems(): ### What to do if the output is daily ### if output_frequency == 'Daily': if output_type == "Surface_runoff": convert_numpy_to_raster_single(Q_surf_np, output_type, bottom_left_corner, cell_size, daily_save_date) if output_type == "Discharge": convert_numpy_to_raster_single(Q_dis, output_type, bottom_left_corner, cell_size, daily_save_date) if output_type == "Water_depth": convert_numpy_to_raster_single(depth_recking, output_type, bottom_left_corner, cell_size, daily_save_date) if output_type == "Spatial_precipitation": convert_numpy_to_raster_single(precipitation, output_type, bottom_left_corner, cell_size, daily_save_date) if output_type == "Sediment_depth": convert_numpy_to_raster_single(sediment_depth, output_type, bottom_left_corner, cell_size, daily_save_date) # Need to change output if output_type == "Net_sediment": convert_numpy_to_raster_single(net_sediment, output_type, bottom_left_corner, cell_size, daily_save_date) # Need to change output ### What happens if the output is weekly ### if output_frequency == 'Weekly': if output_type == "Surface_runoff": arcpy.AddMessage("Surface_runoff added to weekly average") Q_surf_avg = Q_surf_avg + Q_surf_np if output_type == "Discharge": arcpy.AddMessage("Discharge added to weekly average") Q_dis_avg = Q_dis_avg + Q_dis if output_type == "Water_depth": arcpy.AddMessage("Water depth added to weekly average") depth_avg = depth_avg + depth_recking if output_type == "Spatial_precipitation": arcpy.AddMessage("Spatial precipitation added to weekly average") precipitation_avg = precipitation_avg + precipitation if output_type == "Sediment_depth": arcpy.AddMessage("Sediment depth added to weekly average") sed_depth_avg = sed_depth_avg + sediment_depth if output_type == "Net_sediment": arcpy.AddMessage("Net sediment added to weekly average") sed_erosion_deposition_avg = sed_erosion_deposition_avg + net_sediment if week_day == 7: if output_format == 'Daily average': arcpy.AddMessage("Weekly average selected") if output_type == "Surface_runoff": arcpy.AddMessage("Saving weekly runoff average") Q_surf_avg = Q_surf_avg / 7 Q_surf_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_surf_avg, output_type, bottom_left_corner, cell_size, daily_save_date) Q_surf_avg = np.zeros_like(Q_surf_avg) if output_type == "Discharge": arcpy.AddMessage("Saving weekly discharge average") Q_dis_avg = Q_dis_avg / 7 Q_dis_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_dis_avg, output_type, bottom_left_corner, cell_size, daily_save_date) Q_dis_avg = np.zeros_like(Q_dis_avg) if output_type == "Water_depth": arcpy.AddMessage("Saving weekly depth average") depth_avg = depth_avg / 7 depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(depth_avg, output_type, bottom_left_corner, cell_size, daily_save_date) depth_avg = np.zeros_like(depth_avg) if output_type == "Spatial_precipitation": arcpy.AddMessage("Saving weekly spatial precipitation average") precipitation_avg = precipitation_avg / 7 precipitation_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(precipitation_avg, output_type, bottom_left_corner, cell_size, daily_save_date) precipitation_avg = np.zeros_like(precipitation_avg) if output_type == "Sediment_depth": arcpy.AddMessage("Saving weekly sediment depth average") sed_depth_avg = sed_depth_avg / 7 sed_depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_depth_avg, output_type, bottom_left_corner, cell_size, daily_save_date) sed_depth_avg = np.zeros_like(sed_depth_avg) if output_type == "Net_sediment": arcpy.AddMessage("Saving weekly sediment eroision/deposition average") sed_erosion_deposition_avg = sed_erosion_deposition_avg / 7 sed_erosion_deposition_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_erosion_deposition_avg, output_type, bottom_left_corner, cell_size, daily_save_date) sed_erosion_deposition_avg = np.zeros_like(sed_erosion_deposition_avg) elif output_format == 'Total': arcpy.AddMessage("Weekly total selected") if output_type == "Surface_runoff": arcpy.AddMessage("Saving weekly Surface_runoff total") Q_surf_avg = Q_surf_avg Q_surf_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_surf_avg, output_type, bottom_left_corner, cell_size, daily_save_date) Q_surf_avg = np.zeros_like(Q_surf_avg) if output_type == "Discharge": arcpy.AddMessage("Saving weekly discharge total") Q_dis_avg = Q_dis_avg Q_dis_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_dis_avg, output_type, bottom_left_corner, cell_size, daily_save_date) Q_dis_avg = np.zeros_like(Q_dis_avg) if output_type == "Water_depth": arcpy.AddMessage("Saving weekly depth total") depth_avg = depth_avg depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(depth_avg, output_type, bottom_left_corner, cell_size, daily_save_date) depth_avg = np.zeros_like(depth_avg) if output_type == "Spatial_precipitation": arcpy.AddMessage("Saving weekly spatial precipitation total") precipitation_avg = precipitation_avg precipitation_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(precipitation_avg, output_type, bottom_left_corner, cell_size, daily_save_date) precipitation_avg = np.zeros_like(precipitation_avg) if output_type == "Sediment_depth": arcpy.AddMessage("Saving weekly sediment depth total") sed_depth_avg = sed_depth_avg sed_depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_depth_avg, output_type, bottom_left_corner, cell_size, daily_save_date) sed_depth_avg = np.zeros_like(sed_depth_avg) if output_type == "Net_sediment": arcpy.AddMessage("Saving weekly sediment eroision/deposition total") sed_erosion_deposition_avg = sed_erosion_deposition_avg sed_erosion_deposition_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_erosion_deposition_avg, output_type, bottom_left_corner, cell_size, daily_save_date) sed_erosion_deposition_avg = np.zeros_like(sed_erosion_deposition_avg) # What happens if the output is monthly if output_frequency == 'Monthly': arcpy.AddMessage("Tomorrow will be day " + str(tomorrow_day)) if output_type == "Surface_runoff": arcpy.AddMessage("Surface_runoff added to monthly average") Q_surf_avg = Q_surf_avg + Q_surf_np if output_type == "Discharge": arcpy.AddMessage("Discharge added to monthly average") Q_dis_avg = Q_dis_avg + Q_dis if output_type == "Water_depth": arcpy.AddMessage("Water depth added to monthly average") depth_avg = depth_avg + depth_recking if output_type == "Spatial_precipitation": arcpy.AddMessage("Spatial precipitation added to monthly average") precipitation_avg = precipitation_avg + precipitation if output_type == "Sediment_depth": arcpy.AddMessage("Sediment depth added to monthly average") sed_depth_avg = sed_depth_avg + sediment_depth if output_type == "Net_sediment": arcpy.AddMessage("Net sediment added to monthly average") sed_erosion_deposition_avg = sed_erosion_deposition_avg + net_sediment if tomorrow_day == 1: if output_format == 'Daily average': arcpy.AddMessage("Monthly average selected") if output_type == "Surface_runoff": arcpy.AddMessage("Saving monthly Surface_runoff average") Q_surf_avg = Q_surf_avg / month_day Q_surf_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_surf_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) Q_surf_avg = np.zeros_like(Q_surf_avg) if output_type == "Discharge": arcpy.AddMessage("Saving monthly discharge average") Q_dis_avg = Q_dis_avg / month_day Q_dis_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_dis_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) Q_dis_avg = np.zeros_like(Q_dis_avg) if output_type == "Water_depth": arcpy.AddMessage("Saving monthly depth average") depth_avg = depth_avg / month_day depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(depth_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) depth_avg = np.zeros_like(depth_avg) if output_type == "Spatial_precipitation": arcpy.AddMessage("Saving spatial precipitation average") precipitation_avg = precipitation_avg / month_day precipitation_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(precipitation_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) precipitation_avg = np.zeros_like(precipitation_avg) if output_type == "Sediment_depth": arcpy.AddMessage("Saving monthly sediment depth average") sed_depth_avg = sed_depth_avg / month_day sed_depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_depth_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) sed_depth_avg = np.zeros_like(sed_depth_avg) if output_type == "Net_sediment": arcpy.AddMessage("Saving monthly sediment eroision/deposition average") sed_erosion_deposition_avg = sed_erosion_deposition_avg / month_day sed_erosion_deposition_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_erosion_deposition_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) sed_erosion_deposition_avg = np.zeros_like(sed_erosion_deposition_avg) elif output_format == 'Total': arcpy.AddMessage("Monthly total selected") if output_type == "Surface_runoff": arcpy.AddMessage("Saving monthly Surface_runoff total") Q_surf_avg = Q_surf_avg Q_surf_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_surf_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) Q_surf_avg = np.zeros_like(Q_surf_avg) if output_type == "Discharge": arcpy.AddMessage("Saving monthly discharge total") Q_dis_avg = Q_dis_avg Q_dis_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_dis_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) Q_dis_avg = np.zeros_like(Q_dis_avg) if output_type == "Water_depth": arcpy.AddMessage("Saving monthly depth total") depth_avg = depth_avg depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(depth_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) depth_avg = np.zeros_like(depth_avg) if output_type == "Spatial_precipitation": arcpy.AddMessage("Saving spatial precipitation total") precipitation_avg = precipitation_avg precipitation_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(precipitation_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) precipitation_avg = np.zeros_like(precipitation_avg) if output_type == "Sediment_depth": arcpy.AddMessage("Saving monthly sediment depth total") sed_depth_avg = sed_depth_avg sed_depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_depth_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) sed_depth_avg = np.zeros_like(sed_depth_avg) if output_type == "Net_sediment": arcpy.AddMessage("Saving monthly sediment eroision/deposition total") sed_erosion_deposition_avg = sed_erosion_deposition_avg sed_erosion_deposition_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_erosion_deposition_avg, output_type, bottom_left_corner, cell_size, monthly_save_date) sed_erosion_deposition_avg = np.zeros_like(sed_erosion_deposition_avg) # What happens if the output is yearly if output_frequency == 'Yearly': arcpy.AddMessage("Tomorrow will be day " + str(tomorrow_day) + "and month will be " + str(tomorrow_month)) if output_type == "Surface_runoff": arcpy.AddMessage("Surface_runoff added to yearly average") Q_surf_avg = Q_surf_avg + Q_surf_np if output_type == "Discharge": arcpy.AddMessage("Discharge added to yearly average") Q_dis_avg = Q_dis_avg + Q_dis if output_type == "Water_depth": arcpy.AddMessage("Water depth added to yearly average") depth_avg = depth_avg + depth_recking if output_type == "Spatial_precipitation": arcpy.AddMessage("Spatial precipitation added to yearly average") precipitation_avg = precipitation_avg + precipitation if output_type == "Sediment_depth": arcpy.AddMessage("Sediment depth added to yearly average") sed_depth_avg = sed_depth_avg + sediment_depth if output_type == "Net_sediment": arcpy.AddMessage("Net sediment added to yearly average") sed_erosion_deposition_avg = sed_erosion_deposition_avg + net_sediment if tomorrow_day == 1 and tomorrow_month == 1: if output_format == 'Daily average': arcpy.AddMessage("Yearly average selected") if output_type == "Surface_runoff": arcpy.AddMessage("Saving Yearly Surface_runoff average") Q_surf_avg = Q_surf_avg / year_day Q_surf_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_surf_avg, output_type, bottom_left_corner, cell_size, year_save_date) Q_surf_avg = np.zeros_like(Q_surf_avg) if output_type == "Discharge": arcpy.AddMessage("Saving Yearly discharge average") Q_dis_avg = Q_dis_avg / year_day Q_dis_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_dis_avg, output_type, bottom_left_corner, cell_size, year_save_date) Q_dis_avg = np.zeros_like(Q_dis_avg) if output_type == "Water_depth": arcpy.AddMessage("Saving Yearly depth average") depth_avg = depth_avg / year_day depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(depth_avg, output_type, bottom_left_corner, cell_size, year_save_date) depth_avg = np.zeros_like(depth_avg) if output_type == "Spatial_precipitation": arcpy.AddMessage("Saving spatial precipitation Yearly average") precipitation_avg = precipitation_avg / year_day precipitation_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(precipitation_avg, output_type, bottom_left_corner, cell_size, year_save_date) precipitation_avg = np.zeros_like(precipitation_avg) if output_type == "Sediment_depth": arcpy.AddMessage("Saving Yearly sediment depth average") sed_depth_avg = sed_depth_avg / year_day sed_depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_depth_avg, output_type, bottom_left_corner, cell_size, year_save_date) sed_depth_avg = np.zeros_like(sed_depth_avg) if output_type == "Net_sediment": arcpy.AddMessage("Saving yearly net sediment average") sed_erosion_deposition_avg = sed_erosion_deposition_avg / year_day sed_erosion_deposition_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_erosion_deposition_avg, output_type, bottom_left_corner, cell_size, year_save_date) sed_erosion_deposition_avg = np.zeros_like(sed_erosion_deposition_avg) elif output_format == 'Total': arcpy.AddMessage("Yearly total selected") if output_type == "Surface_runoff": arcpy.AddMessage("Saving yearly Surface_runoff total") Q_surf_avg = Q_surf_avg Q_surf_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_surf_avg, output_type, bottom_left_corner, cell_size, year_save_date) Q_surf_avg = np.zeros_like(Q_surf_avg) if output_type == "Discharge": arcpy.AddMessage("Saving yearly discharge total") Q_dis_avg = Q_dis_avg Q_dis_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(Q_dis_avg, output_type, bottom_left_corner, cell_size, year_save_date) Q_dis_avg = np.zeros_like(Q_dis_avg) if output_type == "Water_depth": arcpy.AddMessage("Saving yearly depth total") depth_avg = depth_avg depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(depth_avg, output_type, bottom_left_corner, cell_size, year_save_date) depth_avg = np.zeros_like(depth_avg) if output_type == "Spatial_precipitation": arcpy.AddMessage("Saving yearly precipitation total") precipitation_avg = precipitation_avg precipitation_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(precipitation_avg, output_type, bottom_left_corner, cell_size, year_save_date) precipitation_avg = np.zeros_like(precipitation_avg) if output_type == "Sediment_depth": arcpy.AddMessage("Saving yearly sediment depth total") sed_depth_avg = sed_depth_avg sed_depth_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_depth_avg, output_type, bottom_left_corner, cell_size, year_save_date) sed_depth_avg = np.zeros_like(sed_depth_avg) if output_type == "Net_sediment": arcpy.AddMessage("Saving yearly sediment eroision/deposition total") sed_erosion_deposition_avg = sed_erosion_deposition_avg sed_erosion_deposition_avg[Q_surf_np == -9999] = -9999 convert_numpy_to_raster_single(sed_erosion_deposition_avg, output_type, bottom_left_corner, cell_size, year_save_date) sed_erosion_deposition_avg = np.zeros_like(sed_erosion_deposition_avg) # Counter resets if week_day == 7: week_day = 0 arcpy.AddMessage("Week complete resetting output counter") if tomorrow_day == 1: month_day = 0 arcpy.AddMessage("Month complete resetting output counter") if tomorrow_day == 1 and tomorrow_month == 1: year_day = 0 arcpy.AddMessage("Year complete resetting output counter") # Save the average arrays to disk if required for output_type, output_frequency in output_file_dict.iteritems(): if str(output_frequency) != 'No output' and str(output_frequency) != 'Daily': # Save the average arrays to disk. if output_type == "Surface_runoff": np.save(output_averages_temp[0], Q_surf_avg) del Q_surf_avg if output_type == "Discharge": np.save(output_averages_temp[1], Q_dis_avg) del Q_dis_avg if output_type == "Water_depth": np.save(output_averages_temp[2], depth_avg) del depth_avg if output_type == "Spatial_precipitation": np.save(output_averages_temp[3], precipitation_avg) del precipitation_avg if output_type == "Sediment_depth": np.save(output_averages_temp[4], sed_depth_avg) del sed_depth_avg if output_type == "Net_sediment": np.save(output_averages_temp[5], sed_erosion_deposition_avg) del sed_erosion_deposition_avg return week_day, month_day, year_day def numpystocsv(list_of_numpys, list_of_numpy_names): for numpy_array, numpy_name in izip(list_of_numpys, list_of_numpy_names): numpy_array_type = type(numpy_array).__module__ numpy_array_bool = type(numpy_array).__module__ == np.__name__ if numpy_array_bool == True: np.savetxt(r"D:/EngageTesting/CSV_outputs/" + numpy_name + ".csv", numpy_array, delimiter=",") print numpy_name + " is a numpy array." else: print numpy_name + " is not a numpy array. It is " + str(numpy_array_type)
gpl-2.0
-3,313,822,223,856,635,400
56.598628
147
0.542571
false
h2oai/h2o-dev
h2o-py/tests/testdir_misc/pyunit_frame_from_pandas.py
6
1195
#!/usr/bin/env python # -*- encoding: utf-8 -*- import h2o import pandas as pd from tests import pyunit_utils def test_pandas_to_h2oframe(): def compare_frames(h2ofr, pdfr, colnames=None): if not colnames: colnames = list(pdfr.columns) assert h2ofr.shape == pdfr.shape assert h2ofr.columns == colnames, "Columns differ: %r vs %r" % (h2ofr.columns, colnames) for i in range(len(h2ofr.columns)): s1 = pdfr[pdfr.columns[i]].tolist() s2 = h2ofr[colnames[i]].as_data_frame()[colnames[i]].tolist() assert s1 == s2, ("The columns are different: h2oframe[%d] = %r, pdframe[%d] = %r" % (i, s1, i, s2)) pddf = pd.DataFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]}) h2odf1 = h2o.H2OFrame.from_python(pddf) h2odf2 = h2o.H2OFrame.from_python(pddf, column_names=["A", "B", "C"]) h2odf3 = h2o.H2OFrame(pddf) compare_frames(h2odf1, pddf) compare_frames(h2odf2, pddf, ["A", "B", "C"]) compare_frames(h2odf3, pddf) if __name__ == "__main__": pyunit_utils.standalone_test(test_pandas_to_h2oframe) else: test_pandas_to_h2oframe()
apache-2.0
-2,524,008,531,408,392,000
33.142857
96
0.5841
false
secimTools/SECIMTools
src/scripts/mzrt_match.py
1
14958
#!/usr/bin/env python ################################################################################ # Date: 2017/03/10 # # Module: mzrt_match.py # # VERSION: 1.1 # # AUTHOR: Miguel Ibarra (miguelib@ufl.edu) # # DESCRIPTION: This program compares the features among 2 annotation files based # on their retention time and mz.It will output the results of this # comparison at level of combinations and features. ################################################################################ # Import built-in libraries import argparse import os import logging import itertools # Import add-on libraries import pandas as pd import numpy as np import matplotlib matplotlib.use('Agg') from matplotlib.backends.backend_pdf import PdfPages # Import local data libraries from secimtools.dataManager import interface from secimtools.dataManager import logger as sl # Import local plotting libraries from secimtools.visualManager import module_venn as mVenn def getOptions(): """Function to pull arguments""" parser = argparse.ArgumentParser(description="""Matches rows (features) in 2 files by their m/z and RT values""") # Required Input required = parser.add_argument_group(title='Required Input', description='Required input to the program') required.add_argument('-a1',"--anno1",dest="anno1", action="store", required=True,help="Out path for first file") required.add_argument('-a2',"--anno2",dest="anno2", action="store", required=True,help="Out path for second file") required.add_argument("-ID1","--uniqID1",dest="uniqID1",action="store", required=True, default = "rowID",help="""Name of the column in file1 that contains the uniqID""") required.add_argument("-mz1","--mzID1",dest="mzID1",action="store", required=True, default = "RT",help="""Name of the column in file1 that contains MZ""") required.add_argument("-rt1","--rtID1",dest="rtID1",action="store", required=True, default = "MZ",help="""Name of the column in file1 that contains RT""") required.add_argument("-ID2","--uniqID2",dest="uniqID2",action="store", required=True, default = "rowID",help="""Name of the column in file2 that contains the uniqID""") required.add_argument("-mz2","--mzID2",dest="mzID2",action="store", required=True,default = "MZ",help="""Name of the column in file2 that contains MZ""") required.add_argument("-rt2","--rtID2",dest="rtID2",action="store", required=True,default = "RT",help="""Name of the column in file2 that contains RT""") # Tool Output output = parser.add_argument_group(title='Output files', description='Output paths for the program') output.add_argument('-a', "--all", dest="all", action='store', required=True,help="Out path for All peak combinations File") output.add_argument('-m', "--matched", dest="matched", action='store', required=True,help="Out path for Matched peaks combinations File") output.add_argument('-u1', "--unmatched1", dest="unmatched1", action='store', required=True,help="Out path for Unmatched peaks in file 1") output.add_argument('-u2',"--unmatched2", dest="unmatched2", action='store', required=True,help="Out path for Unmatched peaks in file 2") output.add_argument('-s',"--summary", dest="summary", action='store', required=True,help="Out path for Summary File") output.add_argument('-fig',"--figure", dest="figure", action='store', required=True,help="""Out path for Matched vs Unmatched Combinations Venn Diagram File""") # Tool Input tool = parser.add_argument_group(title="Tool Input", description="Tool Especific Input") tool.add_argument('-mz',"--mzcut",dest="mzcut",action='store', required=False, default="0.005",help="""Window value for MZ matching [default 0.005]""") tool.add_argument('-rt',"--rtcut",dest="rtcut",action='store', required=False, default="0.15",help="""Window value for RT matching [default 0.15]""") tool.add_argument('-n1',"--name1",dest="name1",action='store', required=False, default="F1",help="""Short name for File 1 [default F1]""") tool.add_argument('-n2',"--name2",dest="name2",action='store', required=False, default="F2",help="""Short name for File 2 [default F2]""") args = parser.parse_args() # Standardize paths args.all = os.path.abspath(args.all) args.anno1 = os.path.abspath(args.anno1) args.anno2 = os.path.abspath(args.anno2) args.figure = os.path.abspath(args.figure) args.summary = os.path.abspath(args.summary) args.matched = os.path.abspath(args.matched) args.unmatched1 = os.path.abspath(args.unmatched1) args.unmatched2 = os.path.abspath(args.unmatched2) return(args) def matchFiles (anno1,anno2,MZCut,RTCut,reverse=False): """ Match 2 Files and returns an array with the results. :Arguments: :type anno1: pd.DataFrame. :param anno1: Dataframe with the annotation file 1. :type anno2: pd.DataFrame. :param anno2: Dataframe with the annotation file 2. :type MZCut: int :param MZCut: window size for mz. :type RtCut: int :param RtCut: window size for rt. :type reverse: boolean. :param reverse: flag to determinte wich way the match is going to be done. :Returns: :return: Returns a figure object. :rtype: matplotlib.pyplot.Figure.figure """ if reverse: logger.info(u"Matching annotation file 1 to annotation file 2") else: logger.info(u"Matching annotation file 2 to annotation file 1") #Creating dataframe for matched matched_df = pd.DataFrame(columns=["rowID1","MZ1","RT1","rowID2","MZ2","RT2"]) unmatched_df = pd.DataFrame(columns=["rowID1","MZ1","RT1","rowID2","MZ2","RT2"]) #Iterating over annotation 1 and comparing with annotation 2 for rowID1,MZRT1 in anno1.data.iterrows(): #Setting flag_unmatch flag_unmatch = True #Calculating MZ and RT cuts mzMin = MZRT1[anno1.mz] - MZCut mzMax = MZRT1[anno1.mz] + MZCut rtMin = MZRT1[anno1.rt] - RTCut rtMax = MZRT1[anno1.rt] + RTCut #Iterating over annotation 2 and comparing with anotation 1 for rowID2,MZRT2 in anno2.data.iterrows(): #Match found between anno1 and anno2 if ((mzMin<MZRT2[anno2.mz] and MZRT2[anno2.mz]<mzMax) and (rtMin<MZRT2[anno2.rt] and MZRT2[anno2.rt]<rtMax)): #If reverse reverse the output if reverse: matched_s = pd.DataFrame(data=[[rowID2,MZRT2[anno2.mz], MZRT2[anno2.rt],rowID1,MZRT1[anno1.mz],MZRT1[anno1.rt]]], columns=["rowID1","MZ1","RT1","rowID2","MZ2","RT2"]) #Creting dataframe to apend latter else: matched_s = pd.DataFrame(data=[[rowID1,MZRT1[anno1.mz], MZRT1[anno1.rt],rowID2,MZRT2[anno2.mz],MZRT2[anno2.rt]]], columns=["rowID1","MZ1","RT1","rowID2","MZ2","RT2"]) matched_df = matched_df.append(matched_s) flag_unmatch = False #Exclusively found on anno1 if flag_unmatch: #if reverse reverse output if reverse: unmatched_s = pd.DataFrame(data=[["","","",rowID1,MZRT1[anno1.mz], MZRT1[anno1.rt]]], columns=["rowID1","MZ1","RT1", "rowID2","MZ2","RT2"]) #Create dataframe to append to unmateched dataframe else: unmatched_s = pd.DataFrame(data=[[rowID1,MZRT1[anno1.mz], MZRT1[anno1.rt],"","",""]], columns=["rowID1","MZ1","RT1", "rowID2","MZ2","RT2"]) unmatched_df = unmatched_df.append(unmatched_s) return matched_df,unmatched_df def getSummary (match,umatch1,umatch2): """ Plot the standardized Euclidean distance plot for samples to the Mean. :Arguments: :type match: pd.DataFrame :param match: Dataframe with just the matched combinations :type umatch1: pd.DataFrame :param umatch1: Dataframe with the unmatched combinations for file 1. :type umatch2: pd.DataFrame :param umatch2: Dataframe with the unmatched combinations for file 2. :Returns: :return: dictionary. :rtype: dictionary with all the summary information. """ logger.info(u"Summarizing results") #Calculate combinations nUmatchCombinations1 = len(umatch1) #This is the number of unmatched features nUmatchCombinations2 = len(umatch2) #This is the number of unmatched features nMatchCombinations = len(match) nAllCombinations = len(match) + len(umatch1) + len(umatch2) #Calculate number of features nMatchFeatures1 = len(list(set(match["rowID1"].values))) nMatchFeatures2 = len(list(set(match["rowID2"].values))) nMatchFeatures = nMatchFeatures1 + nMatchFeatures2 nAllFeatures = nUmatchCombinations1+nUmatchCombinations2+nMatchFeatures #Calculate number of multiple features nMultupleFeatures1 = len([len(list(count)) for name,count in itertools.groupby(sorted(match["rowID1"])) if (len(list(count))>1)]) nMultipleFeatures2 = len([len(list(count)) for name,count in itertools.groupby(sorted(match["rowID2"])) if (len(list(count))>1)]) nMultipleFeatures = nMultupleFeatures1+nMultipleFeatures2 #Calculate number of single features nSingleFeatures1 = nMatchFeatures1-nMultupleFeatures1 nSingleFeatures2 = nMatchFeatures2-nMultipleFeatures2 nSingleFeatures = nSingleFeatures1+nSingleFeatures2 #Creating series summary_S = pd.Series([nUmatchCombinations1,nUmatchCombinations2, nMatchCombinations,nAllCombinations, nMatchFeatures1,nMatchFeatures2, nMatchFeatures,nAllFeatures, nMultupleFeatures1,nMultipleFeatures2, nMultipleFeatures,nSingleFeatures1, nSingleFeatures2,nSingleFeatures], index=["UmatchCombinations1","UmatchCombinations2", "MatchCombinations","AllCombinations", "MatchFeatures1","MatchFeatures2", "MatchFeatures","AllFeatures", "MultupleFeatures1","MultipleFeatures2", "MultipleFeatures","SingleFeatures1", "SingleFeatures2","SingleFeatures"]) return summary_S def plotFigures(args,pdf,data): """ Plot the Venn diagrams of the combinations and features :Arguments: :type args: argparse object :param args: argparse object with all the input parameters, from here we are just going to take the short names and the out paths. :type data: dictionary :param data: dictionary with all the data information. """ #Venn match unmatch combinations mvsumCombFig=mVenn.plotVenn2([data["UmatchCombinations1"], data["UmatchCombinations2"],data["MatchCombinations"]], title="MZ-RT Matched vs Unmatched (Combinations)",name1=args.name1, name2=args.name2,circles=True) mvsumCombFig.addToPdf(dpi=600, pdfPages=pdf) #Venn match vs unmatch features mvsumFeatFig=mVenn.plotVenn2([data["UmatchCombinations1"], data["UmatchCombinations2"],data["MatchFeatures"]], title="MZ-RT Matched vs Unmatched (Features)",name1=args.name1, name2=args.name2,circles=True) mvsumFeatFig.addToPdf(dpi=600, pdfPages=pdf) #Venn single vs multiple svsuFeatFig=mVenn.plotVenn2([data["SingleFeatures1"] ,data["SingleFeatures2"],data["MultipleFeatures"]], title="MZ-RT Single vs Multiple",name1=args.name1, name2=args.name2,circles=True) svsuFeatFig.addToPdf(dpi=600, pdfPages=pdf) def writeOutput(paths,files): """ Writes output :Arguments: :type paths: list :param paths: list of out paths :type files: list :param files: list of pandas data frames """ #For each dataframe saves it in its specific file. #File names must match! for i in range(len(files)): files[i].to_csv(paths[i],index=False,sep="\t") def main(args): """Main function""" #Read annotation file 1 anno1 = interface.annoFormat(data=args.anno1, uniqID=args.uniqID1, mz=args.mzID1, rt=args.rtID1) anno2 = interface.annoFormat(data=args.anno2, uniqID=args.uniqID2, mz=args.mzID2, rt=args.rtID2) #Matching files anno1 vs anno2 match12_df,umatch12_df = matchFiles(anno1=anno1,anno2=anno2, MZCut=float(args.mzcut), RTCut=float(args.rtcut)) match21_df,umatch21_df = matchFiles(anno1=anno2,anno2=anno1, MZCut=float(args.mzcut), RTCut=float(args.rtcut), reverse = True) #concatenate match result match_df = pd.concat([match12_df,match21_df],axis=0) #Remove duplicates from match match_df.drop_duplicates(inplace=True) #Create all results file all_df = pd.concat([match_df,umatch12_df,umatch21_df],axis=0) #Get summary data summary_S = getSummary(match_df,umatch12_df,umatch21_df) #Plot venn Diagrams # logger.info(u"Plotting Figures") # with PdfPages(args.figure) as pdfOut: # plotFigures(args=args,pdf=pdfOut,data=summary_S) #Output data writeOutput(paths=[args.unmatched1,args.unmatched2,args.matched,args.all], files=[umatch12_df,umatch21_df,match_df,all_df]) summary_S.to_csv(args.summary,sep="\t") logger.info("Analysis complete") if __name__=='__main__': args = getOptions() logger = logging.getLogger() sl.setLogger(logger) logger.info("Importing data with the following parameters:"\ "\tAnnotation 1: {0}:"\ "\tAnnotation 2: {1}".format(args.anno1,args.anno2)) main(args)
mit
2,853,600,819,641,599,000
41.737143
90
0.603156
false
adrn/streams
streams/rewinder/sampler.py
2
2596
# coding: utf-8 """ Special emcee sampler for Rewinder. """ from __future__ import division, print_function __author__ = "adrn <adrn@astro.columbia.edu>" # Standard library import os import sys import time # Third-party from emcee import EnsembleSampler import numpy as np from astropy import log as logger __all__ = ['RewinderSampler'] # TODO: banish h5py class RewinderSampler(EnsembleSampler): def __init__(self, model, nwalkers=None, pool=None, a=2.): """ """ if nwalkers is None: nwalkers = model.nparameters*2 + 2 self.nwalkers = nwalkers super(RewinderSampler, self).__init__(self.nwalkers, model.nparameters, model, pool=pool, a=a) def write(self, filename, ii=None): if ii is None: ii = self.chain.shape[1] # write the sampler data to an HDF5 file logger.info("Writing sampler data to '{}'...".format(filename)) with h5py.File(filename, "w") as f: f["last_step"] = ii f["chain"] = self.chain f["lnprobability"] = self.lnprobability f["acceptance_fraction"] = self.acceptance_fraction try: f["acor"] = self.acor except: logger.warn("Failed to compute autocorrelation time.") f["acor"] = [] def run_inference(self, pos, nsteps, output_every=None, output_file="emcee_snapshot.txt", first_step=0): # path= """ Custom run MCMC that caches the sampler every specified number of steps. """ if output_every is None: output_every = nsteps logger.info("Running {} walkers for {} steps..." .format(self.nwalkers, nsteps)) time0 = time.time() ii = first_step for outer_loop in range(nsteps // output_every): self.reset() for results in self.sample(pos, iterations=output_every): ii += 1 # TODO: need to append to file... # self.write(os.path.join(path,output_file_fmt.format(ii)), ii=ii) pos = results[0] # the remainder remainder = nsteps % output_every if remainder > 0: self.reset() for results in self.sample(pos, iterations=remainder): ii += 1 # TODO: # self.write(os.path.join(path,output_file_fmt.format(ii)), ii=ii) t = time.time() - time0 logger.debug("Spent {} seconds on main sampling...".format(t))
mit
-5,250,285,706,520,452,000
29.541176
86
0.555085
false
abalckin/cwavenet
examples/WNvsPWN/exm11.py
1
1906
#! /usr/bin/python3 import sys import tool sys.path.append('../../bin/') import wavenet as wn import pylab as plb import numpy as np from scipy.integrate import odeint import cregister as cr from scipy import signal class Caller(object): def __call__(self, prg): pass # Mодельная система def ksi2(t): return (np.sin(7*t)+np.cos(9*t))*0.1 def func1c(y, t): y1, y2 = y return [y2, -5*y2-(6+2*np.sin(0.5*t))*y1+uc(t)+ksi2(t)] def func2c(y, t): y1, y2 = y return [y2, -(2+1*np.sin(0.5*t))*y2-5*y1+uc(t)+ksi2(t)] def func1g(y, t): y1, y2 = y return [y2, -5*y2-(6+2*np.sin(0.5*t))*y1+ug(t)+ksi2(t)] def func2g(y, t): y1, y2 = y return [y2, -(2+1*np.sin(0.5*t))*y2-5*y1+ug(t)+ksi2(t)] def uc(t): return 0. def ug(t): return 0. cb = cr.Caller() cal = Caller() cb.setHandler(cal) k2 = 0.05 N = 200 np.random.seed() c0 = 1.0 a0 = 1. a1 = 1. w0 = -0. w1 = 0. p0 = 1. p1 = 1. nc = 10 znum = 2 t = np.arange(0, 10, 0.1) eps = np.random.normal(0., k2, t.shape[-1]) inpc = np.vectorize(uc)(t) inpc = inpc+eps*np.abs(inpc) eps = np.random.normal(0., k2, t.shape[-1]) inpg = np.vectorize(ug)(t) inpg = inpg+eps*np.abs(inpg) sysag = odeint(func1g, [6., 5.], t)[:, 0] sysac = odeint(func1c, [6., 5.], t)[:, 0] sysbg = odeint(func2g, [6., 5.], t)[:, 0] sysbc = odeint(func2c, [6., 5.], t)[:, 0] ulist = [inpg, inpc, inpg, inpc] tarlist = [sysag, sysac, sysbg, sysbc] eps = np.random.normal(0., k2, sysbg.shape) dg2 = sysbg+eps #dg = sysag+eps*np.abs(sysag) w = wn.Net(nc,c0,a0, a1, w0, w1, p0, p1,znum,0.5,1., wn.ActivateFunc.Morlet, 4) track = w.train(t, t, dg2, wn.TrainStrategy.BFGS, N, 0., 1, True, True) #track = w.train(t, t, dg, wn.TrainStrategy.BFGS, N, 0., 1, False, False) tool.plot(t, t, w, track, orig=sysbg, target=dg2) plb.show() #tool.plot(t, inpg, w, track, orig=sysbg) #plb.show() sys.exit()
gpl-2.0
3,461,503,620,009,279,500
16.672897
80
0.586991
false
slabanja/ase
ase/lattice/triclinic.py
3
3230
"""Function-like object creating triclinic lattices. The following lattice creator is defined: Triclinic """ from ase.lattice.bravais import Bravais import numpy as np from ase.data import reference_states as _refstate class TriclinicFactory(Bravais): "A factory for creating triclinic lattices." # The name of the crystal structure in ChemicalElements xtal_name = "triclinic" # The natural basis vectors of the crystal structure int_basis = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) basis_factor = 1.0 # Converts the natural basis back to the crystallographic basis inverse_basis = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) inverse_basis_factor = 1.0 def get_lattice_constant(self): "Get the lattice constant of an element with triclinic crystal structure." if _refstate[self.atomicnumber]['symmetry'].lower() != self.xtal_name: raise ValueError, (("Cannot guess the %s lattice constant of" + " an element with crystal structure %s.") % (self.xtal_name, _refstate[self.atomicnumber]['symmetry'])) return _refstate[self.atomicnumber].copy() def make_crystal_basis(self): "Make the basis matrix for the crystal unit cell and the system unit cell." lattice = self.latticeconstant if type(lattice) == type({}): a = lattice['a'] try: b = lattice['b'] except KeyError: b = a * lattice['b/a'] try: c = lattice['c'] except KeyError: c = a * lattice['c/a'] alpha = lattice['alpha'] beta = lattice['beta'] gamma = lattice['gamma'] else: if len(lattice) == 6: (a,b,c,alpha,beta,gamma) = lattice else: raise ValueError, "Improper lattice constants for triclinic crystal." degree = np.pi / 180.0 cosa = np.cos(alpha*degree) cosb = np.cos(beta*degree) sinb = np.sin(beta*degree) cosg = np.cos(gamma*degree) sing = np.sin(gamma*degree) lattice = np.array([[a,0,0], [b*cosg, b*sing,0], [c*cosb, c*(cosa-cosb*cosg)/sing, c*np.sqrt(sinb**2 - ((cosa-cosb*cosg)/sing)**2)]]) self.latticeconstant = lattice self.miller_basis = lattice self.crystal_basis = (self.basis_factor * np.dot(self.int_basis, lattice)) self.basis = np.dot(self.directions, self.crystal_basis) assert abs(np.dot(lattice[0],lattice[1]) - a*b*cosg) < 1e-5 assert abs(np.dot(lattice[0],lattice[2]) - a*c*cosb) < 1e-5 assert abs(np.dot(lattice[1],lattice[2]) - b*c*cosa) < 1e-5 assert abs(np.dot(lattice[0],lattice[0]) - a*a) < 1e-5 assert abs(np.dot(lattice[1],lattice[1]) - b*b) < 1e-5 assert abs(np.dot(lattice[2],lattice[2]) - c*c) < 1e-5 Triclinic = TriclinicFactory()
gpl-2.0
3,716,081,925,471,218,700
37.915663
85
0.533437
false
AshivDhondea/SORADSIM
scenarios/main_057_iss_03.py
1
11176
# -*- coding: utf-8 -*- """ Created on Thu Sep 07 14:20:21 2017 Edited: 30/09/17: cleaned up the plots to include in dissertation @author: Ashiv Dhondea """ import math import numpy as np # Importing what's needed for nice plots. import matplotlib.pyplot as plt from matplotlib import rc rc('font', **{'family': 'serif', 'serif': ['Helvetica']}) rc('text', usetex=True) params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']} plt.rcParams.update(params) from mpl_toolkits.axes_grid.anchored_artists import AnchoredText # Libraries needed for time keeping and formatting import datetime as dt import pytz import aniso8601 import pandas as pd # for loading MeerKAT dishes' latlon # --------------------------------------------------------------------------- # print 'Loading MeerKAT positions' dframe = pd.read_excel("MeerKAT64v36.wgs84.64x4_edited.xlsx",sheetname="Sheet1") dframe = dframe.reset_index() meerkat_id = dframe['ID'][0:64] meerkat_lat = dframe['Lat'][0:64] meerkat_lon = dframe['Lon'][0:64] # --------------------------------------------------------------------------- # with open('main_057_iss_00_visibility.txt') as fp: for line in fp: if 'visibility interval in Tx' in line: good_index = line.index('=') visibility_interval = line[good_index+1:-1]; good_index = visibility_interval.index('/'); start_timestring=visibility_interval[:good_index]; end_timestring = visibility_interval[good_index+1:]; fp.close(); # --------------------------------------------------------------------------- # print 'Loading data' timevec = np.load('main_057_iss_02_timevec.npy'); # timevector y_sph_rx = np.load('main_057_iss_02_y_sph_rx.npy'); # spherical measurement vectors in Rx frame y_sph_tx = np.load('main_057_iss_02_y_sph_tx.npy'); # spherical measurement vectors in Tx frame y_sph_rx_meerkat_01 = np.load('main_057_iss_02_y_sph_rx_meerkat_01.npy'); y_sph_rx_meerkat_02 = np.load('main_057_iss_02_y_sph_rx_meerkat_02.npy'); # discretization step length/PRF delta_t = timevec[2]-timevec[1]; # time stamps experiment_timestamps = [None]*len(timevec) index=0; with open('main_057_iss_02_experiment_timestamps.txt') as fp: for line in fp: modified_timestring = line[:-1]; experiment_timestamps[index] = aniso8601.parse_datetime(modified_timestring); index+=1; fp.close(); experiment_timestamps[0] = experiment_timestamps[0].replace(tzinfo=None) experiment_timestamps[-1] = experiment_timestamps[-1].replace(tzinfo=None) title_string = str(experiment_timestamps[0].isoformat())+'Z/'+str(experiment_timestamps[-1].isoformat())+'Z'; norad_id = '25544' # --------------------------------------------------------------------------- # time_index = np.load('main_057_iss_02_time_index.npy'); tx_el_min_index = time_index[0]; tx_el_max_index = time_index[1]; # --------------------------------------------------------------------------- # ## Bistatic Radar characteristics rx_az_min = 0.; # [deg] rx_az_max = 360.; # [deg] rx_el_min = 15.; # [deg] rx_el_max = 88.; # [deg] # --------------------------------------------------------------------------- # # M000 rx_el_min_range = np.where( y_sph_rx[1,tx_el_min_index:tx_el_max_index+1] >= math.radians(rx_el_min)) rx_el_min_index = rx_el_min_range[0][0] + tx_el_min_index; rx_el_max_range = np.where( y_sph_rx[1,rx_el_min_index:tx_el_max_index+1] >= math.radians(rx_el_min)) # 06/09/17: debugged rx_el_max_index = rx_el_max_range[0][-1]+rx_el_min_index; # M001 rx_el_min_range_meerkat_01 = np.where( y_sph_rx_meerkat_01[1,tx_el_min_index:tx_el_max_index+1] >= math.radians(rx_el_min)) rx_el_min_index_meerkat_01 = rx_el_min_range_meerkat_01[0][0] + tx_el_min_index; rx_el_max_range_meerkat_01 = np.where( y_sph_rx_meerkat_01[1,rx_el_min_index_meerkat_01:tx_el_max_index+1] >= math.radians(rx_el_min))# 06/09/17: debugged rx_el_max_index_meerkat_01 = rx_el_max_range_meerkat_01[0][-1]+rx_el_min_index_meerkat_01; # M002 rx_el_min_range_meerkat_02 = np.where( y_sph_rx_meerkat_02[1,tx_el_min_index:tx_el_max_index+1] >= math.radians(rx_el_min)) rx_el_min_index_meerkat_02 = rx_el_min_range_meerkat_02[0][0] + tx_el_min_index; rx_el_max_range_meerkat_02 = np.where( y_sph_rx_meerkat_02[1,rx_el_min_index_meerkat_02:tx_el_max_index+1] >= math.radians(rx_el_min))# 06/09/17: debugged rx_el_max_index_meerkat_02 = rx_el_max_range_meerkat_02[0][-1]+rx_el_min_index_meerkat_02; # --------------------------------------------------------------------------- # # Bounds time_index_rx = np.zeros([2],dtype=np.int64); time_index_rx[0] = max(rx_el_min_index,rx_el_min_index_meerkat_01,rx_el_min_index_meerkat_02); time_index_rx[1] = min(rx_el_max_index,rx_el_max_index_meerkat_01,rx_el_max_index_meerkat_02); print 'bounds for Rx FoR' print time_index_rx[0] print time_index_rx[1] np.save('main_057_iss_03_time_index_rx.npy',time_index_rx); print 'plotting results' # --------------------------------------------------------------------------- # """ # Plot bistatic geometry results f, axarr = plt.subplots(6,sharex=True); plt.rc('text', usetex=True) plt.rc('font', family='serif'); f.suptitle(r"\textbf{Look angles at Rx0, Rx1 \& Rx2 to object %s trajectory for %s}" %(norad_id,title_string) ,fontsize=12,y=1.01) axarr[0].set_title(r'Elevation angle $\theta_{\text{Rx0}}~[\mathrm{^\circ}]$') axarr[0].set_ylabel(r'$\theta_{\text{Rx}}$'); axarr[0].plot(timevec,np.rad2deg(y_sph_rx[1,:])); del_el_ticks=30; el_ticks_range=np.arange(15,90+del_el_ticks,del_el_ticks,dtype=np.int64) axarr[0].set_yticks(el_ticks_range); axarr[0].axhline(rx_el_min,color='darkgreen',linestyle='dashed'); axarr[0].axhline(rx_el_max,color='darkgreen',linestyle='dashed'); #axarr[0].axvspan(timevec[rx_el_min_index],timevec[rx_el_max_index],facecolor='yellow',alpha=0.2); del_az_ticks=40; az_ticks_range=np.arange(-120,40+del_az_ticks,del_az_ticks,dtype=np.int64) axarr[1].set_yticks(az_ticks_range); axarr[1].set_title(r'Azimuth angle $\psi_{\text{Rx0}}~[\mathrm{^\circ}]$') axarr[1].set_ylabel(r'$\psi_{\text{Rx}}$'); axarr[1].plot(timevec,np.rad2deg(y_sph_rx[2,:])); axarr[2].set_title(r'Elevation angle $\theta_{\text{Rx1}}~[\mathrm{^\circ}]$') axarr[2].set_ylabel(r'$\theta_{\text{Rx}}$'); axarr[2].plot(timevec,np.rad2deg(y_sph_rx_meerkat_01[1,:])); axarr[2].set_yticks(el_ticks_range); axarr[2].axhline(rx_el_min,color='darkgreen',linestyle='dashed'); axarr[2].axhline(rx_el_max,color='darkgreen',linestyle='dashed'); #axarr[2].axvspan(timevec[rx_el_min_index],timevec[rx_el_max_index],facecolor='yellow',alpha=0.2); axarr[3].set_title(r'Azimuth angle $\psi_{\text{Rx1}}~[\mathrm{^\circ}]$') axarr[3].set_yticks(az_ticks_range); axarr[3].set_ylabel(r'$\psi_{\text{Rx}}$'); axarr[3].plot(timevec,np.rad2deg(y_sph_rx_meerkat_01[2,:])); axarr[4].set_title(r'Elevation angle $\theta_{\text{Rx2}}~[\mathrm{^\circ}]$') axarr[4].set_ylabel(r'$\theta_{\text{Rx}}$'); axarr[4].set_yticks(el_ticks_range); axarr[4].plot(timevec,np.rad2deg(y_sph_rx_meerkat_02[1,:])); axarr[4].axhline(rx_el_min,color='darkgreen',linestyle='dashed'); axarr[4].axhline(rx_el_max,color='darkgreen',linestyle='dashed'); #axarr[4].axvspan(timevec[rx_el_min_index],timevec[rx_el_max_index],facecolor='yellow',alpha=0.2); axarr[5].set_title(r'Azimuth angle $\psi_{\text{Rx2}}~[\mathrm{^\circ}]$') axarr[5].set_ylabel(r'$\psi_{\text{Rx}}$'); axarr[5].set_yticks(az_ticks_range); axarr[5].plot(timevec,np.rad2deg(y_sph_rx_meerkat_02[2,:])); axarr[5].set_xlabel(r'Time $t~[\mathrm{s}]$'); axarr[0].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') axarr[1].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') axarr[2].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') axarr[3].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') axarr[4].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') axarr[5].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4) at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2") axarr[5].add_artist(at) # Fine-tune figure; hide x ticks for top plots and y ticks for right plots plt.setp([a.get_xticklabels() for a in axarr[0:5]], visible=False) plt.subplots_adjust(hspace=0.6) f.savefig('main_057_iss_03_rxangles.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10) """ # --------------------------------------------------------------------------- # start_vis_epoch = experiment_timestamps[rx_el_min_index]; start_vis_epoch = start_vis_epoch.replace(tzinfo=None); end_vis_epoch = experiment_timestamps[rx_el_max_index]; end_vis_epoch = end_vis_epoch .replace(tzinfo=None); f, axarr = plt.subplots(2,sharex=True); plt.rc('text', usetex=True) plt.rc('font', family='serif'); f.suptitle(r"\textbf{Look angles at Rx0 to object %s trajectory for %s}" %(norad_id,title_string) ,fontsize=12,y=1.01) axarr[0].set_title(r'Elevation angle $\theta_{\text{Rx0}}~[\mathrm{^\circ}]$') axarr[0].set_ylabel(r'$\theta_{\text{Rx0}}$'); axarr[0].plot(timevec,np.rad2deg(y_sph_rx[1,:])); #del_el_ticks=30; #el_ticks_range=np.arange(15,90+del_el_ticks,del_el_ticks,dtype=np.int64) #axarr[0].set_yticks(el_ticks_range); axarr[0].scatter(timevec[rx_el_min_index],math.degrees(y_sph_rx[1,rx_el_min_index]),s=50,marker=r"$\Box$",facecolors='none', edgecolors='crimson',label=r"%s" %str(start_vis_epoch.isoformat()+'Z')); axarr[0].scatter(timevec[rx_el_max_index],math.degrees(y_sph_rx[1,rx_el_max_index]),s=50,marker=r"$\circledcirc$",facecolors='none', edgecolors='purple',label=r"%s" %str(end_vis_epoch.isoformat()+'Z')); axarr[0].legend(loc='center left',title=r"\textbf{Timestamps}",bbox_to_anchor=(1, 0.5),fancybox=True, shadow=True) axarr[0].axhline(rx_el_min,color='darkgreen',linestyle='dashed'); axarr[0].axhline(rx_el_max,color='darkgreen',linestyle='dashed'); axarr[0].axvspan(timevec[rx_el_min_index],timevec[rx_el_max_index],facecolor='green',alpha=0.2); #del_az_ticks=40; #az_ticks_range=np.arange(-120,40+del_az_ticks,del_az_ticks,dtype=np.int64) #axarr[1].set_yticks(az_ticks_range); axarr[1].set_title(r'Azimuth angle $\psi_{\text{Rx0}}~[\mathrm{^\circ}]$') axarr[1].set_ylabel(r'$\psi_{\text{Rx0}}$'); axarr[1].plot(timevec,np.rad2deg(y_sph_rx[2,:])); axarr[1].set_xlabel(r'Time $t~[\mathrm{s}]$'); axarr[0].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') axarr[1].grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') at = AnchoredText(r"$\Delta_t = %f ~\mathrm{s}$" %delta_t,prop=dict(size=6), frameon=True,loc=4) at.patch.set_boxstyle("round,pad=0.05,rounding_size=0.2") axarr[1].add_artist(at) # Fine-tune figure; hide x ticks for top plots and y ticks for right plots plt.setp([a.get_xticklabels() for a in axarr[0:1]], visible=False) #plt.subplots_adjust(hspace=0.6) f.savefig('main_057_iss_03_rx0.pdf',bbox_inches='tight',pad_inches=0.05,dpi=10)
mit
-965,847,633,271,555,000
49.275229
202
0.634395
false
HipSTR-Tool/HipSTR
test/plot_stutter_results.py
4
5142
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from matplotlib.ticker import MaxNLocator import math import sys def plot_dataset(datasets, title, pp): sort_keys = [lambda x: (x[0][3:6], x[0][6:], x[0][0:3])] #sort_keys = [lambda x: (x[0][4:7], x[0][0], x[0][1], x[0][3], x[0][2])] orig_labels = [r"$log_{10}\ \mu$", r"$\beta$", r"$\rho_{m}$", r"$n_{samples}$" , r"$\rho_{s}$", r"$d$", r"$u$"] plot_vals = [True, True, True, True, True, True, True] plot_dists = [False, False, False, False, True, True, True] value_idx = [-1, -1, -1, -1, 0, 1, 2] for param in xrange(1): items = sorted(datasets.items(), key = sort_keys[param]) keys = map(lambda x: x[0], items) fig, axes = plt.subplots(sum(plot_vals), 1, sharex=True, sharey=False) positions = range(len(datasets.keys())) subplot_id = 0 for i in xrange(7): print(i) if not plot_vals[i]: continue if plot_dists[i]: val_sets = map(lambda x: map(lambda y: y[value_idx[i]], x[1]), items) boxes = axes[subplot_id].boxplot(val_sets, positions=positions, sym='') axes[subplot_id].set_ylabel(orig_labels[i]) for line in boxes['whiskers']: line.set_linestyle('-') prev_x_start = -0.25 prev_key = keys[0][i] for j in xrange(len(keys)): if keys[j][i] != prev_key: axes[subplot_id].plot([prev_x_start, (j-0.75)], [prev_key, prev_key], color='g') prev_x_start = j-0.25 prev_key = keys[j][i] axes[subplot_id].plot([prev_x_start, (len(keys)-0.75)], [prev_key, prev_key], color='g') else: vals = map(lambda x: x[0][i], items) axes[subplot_id].set_ylabel(orig_labels[i]) pad = 0.25 prev_x_start = -pad prev_val = vals[0] for j in xrange(len(vals)): if vals[j] != prev_val: axes[subplot_id].plot([prev_x_start, (j-1+pad)], [prev_val, prev_val], color='g', linewidth=4, solid_capstyle='butt') prev_x_start = j-pad prev_val = vals[j] axes[subplot_id].plot([prev_x_start, (len(keys)-1+pad)], [prev_val, prev_val], color='g', linewidth=4, solid_capstyle='butt') axes[subplot_id].tick_params(axis='both', which='major', labelsize=6) subplot_id += 1 axes[0].set_title(title) axes[-1].xaxis.set_ticklabels([]) axes[-1].set_xlabel("Simulation scenario") axes[0].set_ylim((-5.5,-1.0)) axes[1].set_ylim((-0.05, 0.75)) axes[2].set_ylim((0.55, 1.10)) axes[3].set_ylim((50, 550)) axes[4].set_ylim((0.5, 1.0)) axes[5].set_ylim((-0.03, 0.5)) axes[6].set_ylim((-0.04, 0.5)) map(lambda x: x.xaxis.set_ticks_position('none'), axes) map(lambda x: x.yaxis.set_ticks_position('left'), axes) for ax in axes: ax.yaxis.set_major_locator(MaxNLocator(nbins=7)) pp.savefig(fig) plt.close(fig) def main(): input_file = sys.argv[1] output_file = sys.argv[2] data = open(input_file, "r") datasets = {} exc_count = 0 genotyper_indices = {} for line in data: if "EM_FAILED_TO_CONVERGE" in line: continue tokens = line.strip().split() mu, beta, p_geom = map(float, tokens[0:3]) pg_stutter,down,up = map(float, tokens[3:6]) est_pg,est_down,est_up = map(float, tokens[10:13]) mu = math.log10(mu) nsamps = int(tokens[7]) read_counts = tokens[8] #read_percents = tokens[18] haploid = tokens[6] phase_freq = tokens[9] key_one = (read_counts, haploid, phase_freq) if key_one not in datasets: datasets[key_one] = {} key_two = (mu, beta, p_geom, nsamps, pg_stutter, down, up) if key_two in datasets[key_one]: datasets[key_one][key_two].append([est_pg, est_down, est_up]) else: datasets[key_one][key_two] = [[est_pg, est_down, est_up]] data.close() print("Excluding %d results to due to convergence failure messages"%(exc_count)) pp = PdfPages(output_file) for key,dataset in sorted(datasets.items(), key=lambda x: (int(x[0][0].split(",")[0]), x[0][1:])): print(key) title = "Read Counts = " + key[0] if key[1] == "True": title += ", Haploid" else: title += ", Diploid" title += ", %% Phased Reads = %.1f"%(100.0*float(key[2])) if (key[1] == "True") and key[2] != "0.01": continue plot_dataset(dataset, title, pp) pp.close() if __name__ == "__main__": main()
gpl-2.0
-6,436,846,124,504,014,000
35.728571
141
0.5
false
sbtlaarzc/vispy
examples/basics/visuals/image_transforms.py
17
4425
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # vispy: gallery 2 """ Simple demonstration of ImageVisual. """ import numpy as np import vispy.app from vispy import gloo from vispy import visuals from vispy.visuals.transforms import (MatrixTransform, STTransform, arg_to_array, LogTransform, PolarTransform, BaseTransform) from image_visual import get_image class Canvas(vispy.app.Canvas): def __init__(self): vispy.app.Canvas.__init__(self, keys='interactive', size=(800, 800)) # Create 4 copies of an image to be displayed with different transforms image = get_image() self.images = [visuals.ImageVisual(image, method='impostor') for i in range(4)] # Transform all images to a standard size / location (because # get_image() might return unexpected sizes) s = 100. / max(self.images[0].size) tx = 0.5 * (100 - (self.images[0].size[0] * s)) ty = 0.5 * (100 - (self.images[0].size[1] * s)) base_tr = STTransform(scale=(s, s), translate=(tx, ty)) self.images[0].transform = (STTransform(scale=(30, 30), translate=(600, 600)) * SineTransform() * STTransform(scale=(0.1, 0.1), translate=(-5, -5)) * base_tr) tr = MatrixTransform() tr.rotate(40, (0, 0, 1)) tr.rotate(30, (1, 0, 0)) tr.translate((0, -20, -60)) p = MatrixTransform() p.set_perspective(0.5, 1, 0.1, 1000) tr = p * tr tr1 = (STTransform(translate=(200, 600)) * tr * STTransform(translate=(-50, -50)) * base_tr) self.images[1].transform = tr1 tr2 = (STTransform(scale=(3, -100), translate=(200, 50)) * LogTransform((0, 2, 0)) * STTransform(scale=(1, -0.01), translate=(-50, 1.1)) * base_tr) self.images[2].transform = tr2 tr3 = (STTransform(scale=(400, 400), translate=(570, 400)) * PolarTransform() * STTransform(scale=(np.pi/150, -0.005), translate=(-3.3*np.pi/4., 0.7)) * base_tr) self.images[3].transform = tr3 text = visuals.TextVisual( text=['logarithmic', 'polar', 'perspective', 'custom (sine)'], pos=[(100, 20), (500, 20), (100, 410), (500, 410)], color='k', font_size=16) self.visuals = self.images + [text] self.show() def on_draw(self, ev): gloo.clear(color='w', depth=True) for vis in self.visuals: vis.draw() def on_resize(self, event): # Set canvas viewport and reconfigure visual transforms to match. vp = (0, 0, self.physical_size[0], self.physical_size[1]) self.context.set_viewport(*vp) for vis in self.visuals: vis.transforms.configure(canvas=self, viewport=vp) # A simple custom Transform class SineTransform(BaseTransform): """ Add sine wave to y-value for wavy effect. """ glsl_map = """ vec4 sineTransform(vec4 pos) { return vec4(pos.x, pos.y + sin(pos.x), pos.z, 1); }""" glsl_imap = """ vec4 sineTransform(vec4 pos) { return vec4(pos.x, pos.y - sin(pos.x), pos.z, 1); }""" Linear = False @arg_to_array def map(self, coords): ret = coords.copy() ret[..., 1] += np.sin(ret[..., 0]) return ret @arg_to_array def imap(self, coords): ret = coords.copy() ret[..., 1] -= np.sin(ret[..., 0]) return ret def inverse(self): return InvSineTransform() class InvSineTransform(BaseTransform): glsl_map = SineTransform.glsl_imap glsl_imap = SineTransform.glsl_map Linear = False map = SineTransform.imap imap = SineTransform.map def inverse(self): return SineTransform() if __name__ == '__main__': win = Canvas() import sys if sys.flags.interactive != 1: vispy.app.run()
bsd-3-clause
7,830,507,639,976,076,000
30.161972
79
0.519548
false
GiulioRossetti/ndlib
setup.py
1
2071
from setuptools import setup, find_packages from codecs import open from os import path __author__ = 'Giulio Rossetti' __license__ = "BSD-2-Clause" __email__ = "giulio.rossetti@gmail.com" here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup(name='ndlib', version='5.1.1', license='BSD-Clause-2', description='Network Diffusion Library', url='https://github.com/GiulioRossetti/ndlib', author='Giulio Rossetti', author_email='giulio.rossetti@gmail.com', use_2to3=True, entry_points={ 'console_scripts': [ 'NDQL_translate = scripts.NDQL_translate:translate', 'NDQL_execute = scripts.NDQL_execute:execute' ], }, classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 5 - Production/Stable', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: BSD License', "Operating System :: OS Independent", # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python', 'Programming Language :: Python :: 3' ], keywords='epidemics opinion-dynamics simulator complex-networks', long_description=long_description, long_description_content_type='text/markdown', install_requires=['netdispatch', 'python-igraph', 'numpy', 'networkx', 'dynetx', 'scipy', 'bokeh', 'future', ''], packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test", "ndlib.test", "ndlib.test.*"]), )
bsd-2-clause
1,381,096,617,826,774,800
35.982143
119
0.613713
false
MichSchli/QuestionAnsweringGCN
old_version/candidate_selection/tensorflow_models/components/graph_encoders/hypergraph_gcn_propagation_unit.py
1
7838
from candidate_selection.tensorflow_models.components.abstract_component import AbstractComponent from candidate_selection.tensorflow_models.components.graph_encoders.gcn_message_passer import GcnMessagePasser import numpy as np import tensorflow as tf class HypergraphGcnPropagationUnit(AbstractComponent): self_weight_type = None self_bias_type = None def __init__(self, prefix, number_of_relation_types, variables, dimension, hypergraph, weights="block", biases="constant", self_weight="full", self_bias="constant", add_inverse_relations=True, gate_mode="none", gate_input_dim=1): self.add_inverse_relations = add_inverse_relations self.hypergraph = hypergraph self.dimension = dimension self.variable_prefix = prefix self.self_weight_type = self_weight self.self_bias_type = self_bias def get_optimizable_parameters(self): params = [self.W_self_entities, self.W_self_events] params += self.gcn_encoder_ev_to_en.get_optimizable_parameters() params += self.gcn_encoder_en_to_ev.get_optimizable_parameters() params += self.gcn_encoder_en_to_en.get_optimizable_parameters() params += self.gcn_encoder_ev_to_en_invert.get_optimizable_parameters() params += self.gcn_encoder_en_to_ev_invert.get_optimizable_parameters() params += self.gcn_encoder_en_to_en_invert.get_optimizable_parameters() return params def handle_variable_assignment(self, batch_dict, mode): pass def get_regularization_term(self): reg = self.gcn_encoder_ev_to_en.get_regularization_term() reg += self.gcn_encoder_en_to_ev.get_regularization_term() reg += self.gcn_encoder_en_to_en.get_regularization_term() if self.add_inverse_relations: reg += self.gcn_encoder_ev_to_en_invert.get_regularization_term() reg += self.gcn_encoder_en_to_ev_invert.get_regularization_term() reg += self.gcn_encoder_en_to_en_invert.get_regularization_term() return reg def set_gate_features(self, features, type): if type == "entities": self.gcn_encoder_en_to_ev.set_gate_features(features) self.gcn_encoder_en_to_en.set_gate_features(features) if self.add_inverse_relations: self.gcn_encoder_en_to_ev_invert.set_gate_features(features) self.gcn_encoder_en_to_en_invert.set_gate_features(features) elif type == "events": self.gcn_encoder_ev_to_en.set_gate_features(features) if self.add_inverse_relations: self.gcn_encoder_ev_to_en_invert.set_gate_features(features) def set_gate_key(self, key): self.gcn_encoder_en_to_ev.set_gate_key(key) self.gcn_encoder_en_to_en.set_gate_key(key) self.gcn_encoder_ev_to_en.set_gate_key(key) if self.add_inverse_relations: self.gcn_encoder_en_to_ev_invert.set_gate_key(key) self.gcn_encoder_en_to_en_invert.set_gate_key(key) self.gcn_encoder_ev_to_en_invert.set_gate_key(key) def prepare_tensorflow_variables(self, mode="train"): self.gcn_encoder_ev_to_en.prepare_variables() self.gcn_encoder_en_to_ev.prepare_variables() self.gcn_encoder_en_to_en.prepare_variables() if self.add_inverse_relations: self.gcn_encoder_ev_to_en_invert.prepare_variables() self.gcn_encoder_en_to_ev_invert.prepare_variables() self.gcn_encoder_en_to_en_invert.prepare_variables() initializer_event_weight = np.random.normal(0, 0.01, size=(self.dimension, self.dimension)).astype( np.float32) self.W_events = tf.Variable(initializer_event_weight, name=self.variable_prefix + "event_transform_weights") self.b_events = tf.Variable(np.zeros(self.dimension).astype(np.float32), name=self.variable_prefix + "event_transform_bias") initializer_event_weight_2 = np.random.normal(0, 0.01, size=(self.dimension, self.dimension)).astype( np.float32) self.W_events_2 = tf.Variable(initializer_event_weight_2, name=self.variable_prefix + "event_transform_weights_2") self.b_events_2 = tf.Variable(np.zeros(self.dimension).astype(np.float32), name=self.variable_prefix + "event_transform_bias_2") if self.self_weight_type == "full": initializer_v = np.random.normal(0, 0.01, size=(self.dimension, self.dimension)).astype( np.float32) self.W_self_entities = tf.Variable(initializer_v, name=self.variable_prefix + "self_entitity_weights") initializer_e = np.random.normal(0, 0.01, size=(self.dimension, self.dimension)).astype( np.float32) self.W_self_events = tf.Variable(initializer_e, name=self.variable_prefix + "self_event_weights") if self.self_bias_type == "constant": self.b_self_entities = tf.Variable(np.zeros(self.dimension).astype(np.float32), name=self.variable_prefix + "self_entitity_bias") self.b_self_events = tf.Variable(np.zeros(self.dimension).astype(np.float32), name=self.variable_prefix + "self_event_bias") def get_edge_gates(self): edge_gates = [None]*6 edge_gates[0] = self.gcn_encoder_en_to_ev.get_edge_gates() edge_gates[1] = self.gcn_encoder_ev_to_en.get_edge_gates() edge_gates[2] = self.gcn_encoder_en_to_en.get_edge_gates() edge_gates[3] = self.gcn_encoder_en_to_ev_invert.get_edge_gates() edge_gates[4] = self.gcn_encoder_ev_to_en_invert.get_edge_gates() edge_gates[5] = self.gcn_encoder_en_to_en_invert.get_edge_gates() return edge_gates def propagate(self): # Propagate information to events: # For now apply no self transform to events if self.self_weight_type == "full": event_self_loop_messages = tf.matmul(self.hypergraph.event_vertex_embeddings, self.W_self_events) if self.self_bias_type == "constant": event_self_loop_messages += self.b_self_events self.hypergraph.event_vertex_embeddings = self.gcn_encoder_en_to_ev.get_update(self.hypergraph) if self.add_inverse_relations: self.hypergraph.event_vertex_embeddings += self.gcn_encoder_en_to_ev_invert.get_update(self.hypergraph) self.hypergraph.event_vertex_embeddings = tf.matmul(self.hypergraph.event_vertex_embeddings, self.W_events) self.hypergraph.event_vertex_embeddings += self.b_events self.hypergraph.event_vertex_embeddings = tf.nn.relu(self.hypergraph.event_vertex_embeddings) self.hypergraph.event_vertex_embeddings = tf.matmul(self.hypergraph.event_vertex_embeddings, self.W_events_2) self.hypergraph.event_vertex_embeddings += self.b_events_2 # Propagate information to vertices: if self.self_weight_type == "full": self_loop_messages = tf.matmul(self.hypergraph.entity_vertex_embeddings, self.W_self_entities) else: self_loop_messages = self.hypergraph.entity_vertex_embeddings if self.self_bias_type == "constant": self_loop_messages += self.b_self_entities entity_vertex_embeddings = self.gcn_encoder_ev_to_en.get_update(self.hypergraph) if self.add_inverse_relations: entity_vertex_embeddings += self.gcn_encoder_ev_to_en_invert.get_update(self.hypergraph) entity_vertex_embeddings += self.gcn_encoder_en_to_en.get_update(self.hypergraph) if self.add_inverse_relations: entity_vertex_embeddings += self.gcn_encoder_en_to_en_invert.get_update(self.hypergraph) self.hypergraph.entity_vertex_embeddings = entity_vertex_embeddings + self_loop_messages
mit
3,995,360,640,255,091,000
48.923567
159
0.664838
false
EconForge/dolo
dolo/numeric/distribution.py
1
20938
## Useful Links ## Common probability distributions: ## https://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/ ## ## Distributions.jl:(args, kwargs)) ## https://juliastats.github.io/Distributions.jl/stable/ ## ## Scipy.stats: ## https://docs.scipy.org/doc/scipy-0.17.1/reference/stats.html ## ## Quantecon/rvlib list: ## https://github.com/QuantEcon/rvlib/tree/multivariate ## ## Quantecon/rvlib univarite: ## https://github.com/QuantEcon/rvlib/blob/multivariate/rvlib/univariate.py ## ## Hark/utilities.py: (all univariate?) ## https://github.com/econ-ark/HARK/blob/d99393973554b1cf830c6285e6da59d98ff242ff/HARK/utilities.py ## ## Dolo processes.py: ## https://github.com/EconForge/dolo/blob/master/dolo/numeric/processes.py ## ## Dolo processes.jl: ## https://github.com/EconForge/Dolo.jl/blob/master/src/numeric/processes.jl ## This code # Here we have the list of classes implemented by Rvlib # They only have Mv normal in multivariate (add at least log-normal) # Do we sepearte mv and univariate? # If not create a dict with small and capital letters denoting uni and mv cases # Then accept both and convert automatically to do operations for mv case ? # For which cases we have mv? mostly Normal/UNormal # Parameter names are (so far) used as they appear in Distributions.jl # This seems like the richest source for distributions with very clear documentation import numpy as np # type: ignore from scipy.stats import norm, uniform, lognorm, beta # type: ignore from matplotlib import pyplot as plt # type: ignore import numpy as np # type: ignore from dataclasses import dataclass # type: ignore from typing import List, TypeVar, Generic, Union, Any, Callable # type: ignore from typing import Iterator, Tuple # type: ignore from dolang.language import greek_tolerance, language_element # type: ignore from dolo.numeric.processes import IIDProcess, DiscretizedIIDProcess # type: ignore Vector = List[float] Matrix = List[Vector] T = TypeVar("T") class Distribution(IIDProcess): """ A multivariate distribution. Attributes: d(int): number of dimensions. names(list[str], optional): variable names """ d: int # number of dimensions names: Union[None, Tuple[str, ...]] # names of variables (optional) def draw(self, N: int) -> Matrix: "Compute `N` random draws. Returns an `N` times `d` matrix." raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) def integrate(self, f) -> float: "Computes the expectation $E_u f(u)$ for given function `f`" raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) ### ### Continuous Distributions ### class ContinuousDistribution(Distribution): def discretize(self, **kwargs): # ->DiscreteDistribution: raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) ### ### Discrete Distributions ### class DiscreteDistribution(Distribution, DiscretizedIIDProcess): """ A multivariate discrete distribution. Attributes: d(int): number of dimensions. names(list[str], optional): variable names n(int): number of discretization points origin(distribution, optional): distribution that was discretized """ n: int # number of discretization points origin: Union[None, ContinuousDistribution] def point(self, i) -> Vector: "Returns i-th discretization point (a Vector)" raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) def weight(self, i) -> float: "Returns i-th discretization point (a float)" raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) def items(self) -> Iterator[Tuple[float, Vector]]: """Returns a generator yielding all points and weights. Example: sum( [ w*f(x) for (w,x) in discrete_dist.items() ] ) """ return ((self.weight(i), self.point(i)) for i in range(self.n)) def integrate(self, fun: Callable[[Vector], T]) -> T: # alread documented by the ancestor return sum(w * fun(x) for (w, x) in self.items()) class EquiprobableDistribution(DiscreteDistribution): points: Vector def __init__(self, points: Vector = None, origin: Union[Distribution, None] = None): n, d = points.shape self.d = d self.n = n self.points = points self.origin = origin @property def weights(self) -> Vector: # so that it can behave like a FiniteDistribution (notably for graphs) w = np.ones(self.n) w /= self.n return w def point(self, i) -> float: return self.points[i, :] def weight(self, i) -> float: return 1 / self.n def draw(self, N: int) -> Matrix: import numpy.random inds = numpy.random.randint(low=0, high=self.n, size=N) return self.points[inds, :] def __repr__(self): return f"EquiprobableDistribution(points={self.points.__repr__()}, origin={str(self.origin)})" def __str__(self): return f"EquiprobableDistribution(points={self.points}, origin={self.origin})" # Special kind of Discrete distributions characterized # by a list of points and a list of weights. class FiniteDistribution(DiscreteDistribution): points: Vector weights: Vector def __init__( self, points: Vector = None, weights: Vector = None, origin: Union[Distribution, None] = None, ): n, d = points.shape self.d = d self.n = n assert len(weights) == n self.points = points self.weights = weights self.origin = origin def draw(self, N: int) -> Matrix: import numpy.random choices = numpy.random.choice(range(self.n), size=N, p=self.weights) return self.points[choices, :] def point(self, i) -> float: return self.points[i, :] def weight(self, i) -> float: return self.weights[i] def __repr__(self): return f"FiniteDistribution(points={self.points.__repr__()}, weights={self.weights.__repr__()}, origin={str(self.origin)})" def __str__(self): return f"FiniteDistribution(points={self.points}, weights={self.weights}, origin={self.origin})" def product_iid(iids: List[FiniteDistribution]) -> FiniteDistribution: from dolo.numeric.misc import cartesian nn = [len(f.weights) for f in iids] cart = cartesian([range(e) for e in nn]) nodes = np.concatenate( [f.points[cart[:, i], :] for i, f in enumerate(iids)], axis=1 ) weights = iids[0].weights for f in iids[1:]: weights = np.kron(weights, f.weights) return FiniteDistribution(nodes, weights) ### ### Discrete Distributions ### @language_element ##@dataclass class Bernouilli(DiscreteDistribution): π: float = 0.5 signature = {"π": "float"} # this is redundant for now @greek_tolerance def __init__(self, π: float = None): self.π = float(π) def discretize(self, to="iid"): if to != "iid": raise Exception("Not implemented (yet).") x = np.array([[0], [1]]) w = np.array([1 - self.π, self.π]) return FiniteDistribution(x, w) def draw(self, N: int) -> Matrix: a = np.array([0, 1]) c = np.random.choice(a, size=N) return c.reshape((N, 1)) @language_element ##@dataclass class Binomial(DiscreteDistribution): π: float = 0.5 n: int # TODO ### ### 1d Continuous Distributions ### class UnivariateContinuousDistribution(ContinuousDistribution): """ A univariate distribution. Attributes: d(int): number of dimensions. names(list[str], optional): variable names """ d = 1 def ppf(self, quantiles: Vector) -> Vector: "Percentage Point Function (inverse CDF)" raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) def cdf(self, quantiles: Vector) -> Vector: "Cumulative Distribution" raise Exception( f"Not Implemented (yet). Should be implemented by subclass {self.__class__.name}" ) def discretize(self, to="iid", N=5, method="equiprobable", mass_point="median"): """ Returns a discretized version of this process. Parameters ---------- N : int Number of point masses in the discretized distribution. method : str 'equiprobable' or 'gauss-hermite' mass_point : str 'median', 'left', 'middle', or 'right' Returns: ------------ process : DiscreteDistribution A discrete distribution. """ if method == "gauss-hermite": return self.__discretize_gh__(N=N) elif method == "equiprobable": return self.__discretize_ep__(N=N, mass_point=mass_point) else: raise Exception("Unknown discretization method.") if to != "iid": raise Exception("Not implemented (yet).") def __discretize_ep__(self, N=5, mass_point="median"): # Equiprobable if mass_point == "median": p = np.linspace(0.5 / N, 1 - 0.5 / N, N) q = self.ppf(p) elif mass_point == "left": p = np.linspace(0, 1 - 1 / N, N) q = self.ppf(p) elif mass_point == "middle": p = np.linspace(0.0, 1, N + 1) q = self.ppf(p) q = 0.5 * (q[1:] + q[:-1]) elif mass_point == "right": p = np.linspace(1 / N, 1, N) q = self.ppf(p) else: raise Exception("Not implemented") w = (1 / (N)) * np.ones(N) return EquiprobableDistribution(q[:, None], origin=self) @language_element ##@dataclass class UNormal(UnivariateContinuousDistribution): μ: float = 0.0 σ: float = 1.0 signature = {"μ": "Optional[float]", "σ": "float"} # this is redundant for now @greek_tolerance def __init__(self, σ: float = None, μ: float = None): self.σ = float(σ) self.μ = 0.0 if μ is None else float(μ) def ppf(self, quantiles): x = norm.ppf(quantiles, loc=self.μ, scale=self.σ) return x def cdf(self, x): p = norm.cdf(x, loc=self.μ, scale=self.σ) return p def draw(self, N): from numpy.random import multivariate_normal Sigma = np.array([[self.σ ** 2]]) mu = np.array([self.μ]) sim = multivariate_normal(mu, Sigma, size=N) return sim.reshape((N, 1)) def integrate(self, fun) -> float: # I don't think anybody should use that. It's just an example σ = self.σ μ = self.μ f = ( lambda x: fun(x) / σ / np.sqrt(2 * np.pi) * np.exp(-1 / 2 * ((x - μ) / σ) ** 2) ) import scipy.integrate return scipy.integrate.quad(f, -np.Inf, np.Inf)[0] def __discretize_gh__(self, N=5): # Gauss-Hermite # Maybe we can avoid that one by inheriting from mvNormal from dolo.numeric.discretization.quadrature import gauss_hermite_nodes [x, w] = gauss_hermite_nodes(N, np.array([[self.σ ** 2]]), mu=self.μ) x += np.array([self.μ])[:, None] return FiniteDistribution(x, w, origin=self) @language_element ##@dataclass class Uniform(UnivariateContinuousDistribution): # uniform distribution over an interval [a,b] a: float = 0.0 b: float = 1.0 def __init__(self, a: float = 0.0, b: float = 1.0): self.a = float(a) self.b = float(b) def ppf(self, quantiles: Vector) -> Vector: x = uniform.ppf(quantiles, loc=self.a, scale=(self.b - self.a)) return x def cdf(self, x: Vector) -> Vector: p = uniform.cdf(x, loc=self.a, scale=(self.b - self.a)) return p def draw(self, N) -> Matrix: from numpy.random import uniform sim = uniform(self.a, self.b, N) return sim.reshape((N, 1)) @language_element # @dataclass class LogNormal(UnivariateContinuousDistribution): # parametrization a lognormal random variable Y is in terms of # the mean, μ, and standard deviation, σ, of the unique normally distributed random variable X # such that exp(X) = Y. μ: float = 0.0 σ: float = 1.0 signature = {"μ": "Optional[float]", "σ": "float"} # this is redundant for now @greek_tolerance def __init__(self, σ: float = 0.0, μ: float = 1.0): self.σ = float(σ) self.μ = float(μ) # From scipy: defined as lognorm.pdf(x, s, loc, scale) # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.lognorm.html#scipy.stats.lognorm # A common parametrization for a lognormal random variable Y is in terms of # the mean, mu, and standard deviation, sigma, of the unique normally distributed random variable X # such that exp(X) = Y. # This parametrization corresponds to setting s = sigma and scale = exp(mu). def ppf(self, quantiles): x = lognorm.ppf(quantiles, s=self.σ, loc=0, scale=np.exp(self.μ)) return x # method: See hark # Makes an equiprobable distribution by # default, but user can optionally request augmented tails with exponentially # sized point masses. This can improve solution accuracy in some models. # https://github.com/econ-ark/HARK/blob/d99393973554b1cf830c6285e6da59d98ff242ff/HARK/utilities.py # Returns # inodes # Discrete points for discrete probability mass function. # weights # Probability associated with each point in grid (nodes) class Beta(UnivariateContinuousDistribution): α: float β: float signature = {"α": "float", "β": "float"} def __init__(self, α: float = None, β: float = None): self.α = float(α) self.β = float(β) def ppf(self, quantiles): x = beta.ppf(quantiles, self.α, self.β) return x # method: In hark utilities # reference https://github.com/econ-ark/HARK/blob/d99393973554b1cf830c6285e6da59d98ff242ff/HARK/utilities.py # Returns # inodes ("X" in hark) # Discrete points for discrete probability mass function. # weights ("pmf" in Hark : Discrete points for discrete probability mass function.) # Probability associated with each point in grid (nodes) #% ### ### nd continous distribution ### @language_element class Normal(ContinuousDistribution): Μ: Vector # this is capital case μ, not M... 😭 Σ: Matrix signature = {"Σ": "Matrix", "Μ": "Optional[Vector]"} @greek_tolerance def __init__(self, Σ=None, Μ=None): Sigma = Σ mu = Μ self.Σ = np.atleast_2d(np.array(Sigma, dtype=float)) self.d = len(self.Σ) if mu is None: self.Μ = np.array([0.0] * self.d) else: self.Μ = np.array(mu, dtype=float) assert self.Σ.shape[0] == self.d assert self.Σ.shape[0] == self.d # this class wraps functionality from scipy import scipy.stats self._dist_ = scipy.stats.multivariate_normal(mean=self.Μ, cov=self.Σ) def draw(self, N: int) -> Matrix: res = self._dist_.rvs(size=N) if res.ndim == 1: # scipy returns a 1d object for 1d distribution res = res[:, None] return res def discretize(self, N=None) -> FiniteDistribution: if N is None: N = 5 if isinstance(N, int): N = [N] * self.d from dolo.numeric.discretization.quadrature import gauss_hermite_nodes # type: ignore [x, w] = gauss_hermite_nodes(N, self.Σ, mu=self.Μ) x = np.row_stack([(e + self.Μ) for e in x]) return FiniteDistribution(x, w, origin=self) def __repr__(self): return f"Normal(Μ={self.Μ.__repr__()},Σ={self.Σ.__repr__()})" def __str__(self): return f"Normal(Μ={self.Μ},Σ={self.Σ})" MvNormal = Normal class ProductDistribution(ContinuousDistribution): distributions: List[Distribution] # def __new__(self, distributions: List[Distributions]): # # if all distributions are normal we can interrupt the object # # construction and return a multivariate normal object instead # # of a product object def __init__(self, distributions: List[Distribution]): self.distributions = distributions self.d = sum([dis.d for dis in distributions]) self.names = sum([dis.names for dis in self.distributions], tuple()) def discretize(self): # TODO: pass some options fids = [dis.discretize() for dis in self.distributions] return product_iid(fids) def draw(self, N: int) -> Matrix: return np.concatenate([dis.draw(N) for dis in self.distributions], axis=1) def product_iid(iids: List[FiniteDistribution]) -> FiniteDistribution: from dolo.numeric.misc import cartesian nn = [len(f.weights) for f in iids] cart = cartesian([range(e) for e in nn]) nodes = np.concatenate( [f.points[cart[:, i], :] for i, f in enumerate(iids)], axis=1 ) weights = iids[0].weights for f in iids[1:]: weights = np.kron(weights, f.weights) return FiniteDistribution(nodes, weights) ### ### Truncation and Mixtures ### C = TypeVar("C", bound=ContinuousDistribution) C1 = TypeVar("C1", bound=ContinuousDistribution) C2 = TypeVar("C2", bound=ContinuousDistribution) class Truncation(UnivariateContinuousDistribution, Generic[C]): dist: C def __init__(self, dist: C, lb=-np.inf, ub=np.inf): self.dist = dist if lb == -np.inf: self.__min_q__ = 0.0 else: self.__min_q__ = self.dist.cdf([lb])[0] if ub == np.inf: self.__max_q__ = 1.0 else: self.__max_q__ = self.dist.cdf([ub])[0] def draw(self, N: int): # TODO: replace this stupid algo raise Exception("Not Implemented") def ppf(self, quantiles: Vector) -> Vector: q_lb = self.__min_q__ q_ub = self.__max_q__ q = q_lb + (q_ub - q_lb) * quantiles return self.dist.ppf(q) @language_element class Mixture(ContinuousDistribution): index: DiscreteDistribution # values must be [0,1,..n] distributions: Tuple[UnivariateContinuousDistribution, ...] # length musth be [n] signature = {"index": "DiscreteDistribution", "distributions": "List[Distribution]"} def __init__(self, index=None, distributions=None): # index is a distribution which takes discrete values # distributions is a map from each of these values to a distribution self.index = index self.distributions = distributions ds = [e.d for e in self.distributions.values()] assert len(set(ds)) == 1 d0 = [*self.distributions.values()][0] self.d = d0.d # TODO: check all distributions have the same variable names self.names = d0.names def discretize(self, to="iid"): if to != "iid": raise Exception("Not implemented (yet).") inddist = self.index.discretize() nodes = [] weights = [] for i in range(inddist.n_inodes(0)): wind = inddist.iweight(0, i) xind = inddist.inode(0, i) dist = self.distributions[i].discretize() for j in range(dist.n_inodes(0)): w = dist.iweight(0, j) x = dist.inode(0, j) nodes.append(x) weights.append(wind * w) nodes = np.concatenate([e[None, :] for e in nodes], axis=0) weights = np.array(weights) return FiniteDistribution(nodes, weights) def draw(self, N: int) -> Matrix: # naive and overkill algorithm inds = self.index.draw(N) # should be (N x 1) array return sum( [(inds == k) * dist.draw(N) for (k, dist) in self.distributions.items()] ) # @language_element # def Mixture(index=None, distributions=None): # for dist in distributions.values(): # if not (isinstance(dist, IIDProcess)): # raise Exception("Only mixtures of iid processes are supported so far.") # return IIDMixture(index, distributions) # # not clear what we might do with non-iid # Mixture.signature = {'index': 'intprocess', 'distributions': 'Dict[int,IIDProcesses]'}
bsd-2-clause
2,187,045,113,317,805,600
27.634615
131
0.606351
false
jonasrothfuss/DeepEpisodicMemory
models/model_zoo/model_conv5_fc128.py
1
9446
import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.contrib.layers.python import layers as tf_layers from models.conv_lstm import basic_conv_lstm_cell # Amount to use when lower bounding tensors RELU_SHIFT = 1e-12 FC_LAYER_SIZE = 128 # kernel size for DNA and CDNA. DNA_KERN_SIZE = 5 def encoder_model(frames, sequence_length, initializer, scope='encoder', fc_conv_layer=False): """ Args: frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels) sequence_length: number of frames that shall be encoded scope: tensorflow variable scope name initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data) fc_conv_layer: adds an fc layer at the end of the encoder Returns: hidden4: hidden state of highest ConvLSTM layer fc_conv_layer: indicated whether a Fully Convolutional (8x8x16 -> 1x1x1024) shall be added """ lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5 = None, None, None, None, None for i in range(sequence_length): frame = frames[:,i,:,:,:] reuse = (i > 0) with tf.variable_scope(scope, reuse=reuse): #LAYER 1: conv1 conv1 = slim.layers.conv2d(frame, 16, [5, 5], stride=2, scope='conv1', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm1'}) #LAYER 2: convLSTM1 hidden1, lstm_state1 = basic_conv_lstm_cell(conv1, lstm_state1, 16, initializer, filter_size=5, scope='convlstm1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') #LAYER 3: conv2 conv2 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [5, 5], stride=2, scope='conv2', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm3'}) #LAYER 4: convLSTM2 hidden2, lstm_state2 = basic_conv_lstm_cell(conv2, lstm_state2, 16, initializer, filter_size=5, scope='convlstm2') hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm4') #LAYER 5: conv3 conv3 = slim.layers.conv2d(hidden2, hidden2.get_shape()[3], [5, 5], stride=2, scope='conv3', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm5'}) #LAYER 6: convLSTM3 hidden3, lstm_state3 = basic_conv_lstm_cell(conv3, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm6') #LAYER 7: conv4 conv4 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm7'}) #LAYER 8: convLSTM4 (8x8 featuremap size) hidden4, lstm_state4 = basic_conv_lstm_cell(conv4, lstm_state4, 32, initializer, filter_size=3, scope='convlstm4') hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm8') #LAYER 8: conv5 conv5 = slim.layers.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv5', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm9'}) hidden5, lstm_state5 = basic_conv_lstm_cell(conv5, lstm_state5, 32, initializer, filter_size=3, scope='convlstm5') hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm10') #LAYER 9: Fully Convolutional Layer (8x8x16 --> 1x1xFC_LAYER_SIZE) if fc_conv_layer: fc_conv = slim.layers.conv2d(hidden5, FC_LAYER_SIZE, [4,4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer) hidden_repr = fc_conv else: hidden_repr = hidden5 return hidden_repr def decoder_model(hidden_repr, sequence_length, initializer, num_channels=3, scope='decoder', fc_conv_layer=False): """ Args: hidden_repr: Tensor of latent space representation sequence_length: number of frames that shall be decoded from the hidden_repr num_channels: number of channels for generated frames initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data) fc_conv_layer: adds an fc layer at the end of the encoder Returns: frame_gen: array of generated frames (Tensors) fc_conv_layer: indicates whether hidden_repr is 1x1xdepth tensor a and fully concolutional layer shall be added """ frame_gen = [] lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5 = None, None, None, None, None assert (not fc_conv_layer) or (hidden_repr.get_shape()[1] == hidden_repr.get_shape()[2] == 1) for i in range(sequence_length): reuse = (i > 0) #reuse variables (recurrence) after first time step with tf.variable_scope(scope, reuse=reuse): #Fully Convolutional Layer (1x1xFC_LAYER_SIZE -> 8x8x16) if fc_conv_layer: fc_conv = slim.layers.conv2d_transpose(hidden_repr, 32, [4, 4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer) hidden1_input = fc_conv else: hidden1_input = hidden_repr #LAYER 1: convLSTM1 hidden1, lstm_state1 = basic_conv_lstm_cell(hidden1_input, lstm_state1, 32, initializer, filter_size=3, scope='convlstm1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm1') #LAYER 2: upconv1 (8x8 -> 16x16) upconv1 = slim.layers.conv2d_transpose(hidden1, hidden1.get_shape()[3], 3, stride=2, scope='upconv1', weights_initializer=initializer, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm2'}) #LAYER 3: convLSTM2 hidden2, lstm_state2 = basic_conv_lstm_cell(upconv1, lstm_state2, 32, initializer, filter_size=3, scope='convlstm2') hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3') #LAYER 4: upconv2 (16x16 -> 32x32) upconv2 = slim.layers.conv2d_transpose(hidden2, hidden2.get_shape()[3], 3, stride=2, scope='upconv2', weights_initializer=initializer, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm4'}) #LAYER 5: convLSTM3 hidden3, lstm_state3 = basic_conv_lstm_cell(upconv2, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm5') # LAYER 6: upconv3 (32x32 -> 64x64) upconv3 = slim.layers.conv2d_transpose(hidden3, hidden3.get_shape()[3], 5, stride=2, scope='upconv3', weights_initializer=initializer, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm6'}) #LAYER 7: convLSTM4 hidden4, lstm_state4 = basic_conv_lstm_cell(upconv3, lstm_state4, 16, initializer, filter_size=5, scope='convlstm4') hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm7') #Layer 8: upconv4 (64x64 -> 128x128) upconv4 = slim.layers.conv2d_transpose(hidden4, 16, 5, stride=2, scope='upconv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm8'}) #LAYER 9: convLSTM5 hidden5, lstm_state5 = basic_conv_lstm_cell(upconv4, lstm_state5, 16, initializer, filter_size=5, scope='convlstm5') hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm9') upconv5 = slim.layers.conv2d_transpose(hidden5, num_channels, 5, stride=2, scope='upconv5', weights_initializer=initializer) frame_gen.append(upconv5) assert len(frame_gen)==sequence_length return frame_gen def composite_model(frames, encoder_len=5, decoder_future_len=5, decoder_reconst_len=5, uniform_init=True, num_channels=3, fc_conv_layer=True): """ Args: frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels) encoder_len: number of frames that shall be encoded decoder_future_sequence_length: number of frames that shall be decoded from the hidden_repr uniform_init: specifies if the weight initialization should be drawn from gaussian or uniform distribution (default:uniform) num_channels: number of channels for generated frames fc_conv_layer: indicates whether fully connected layer shall be added between encoder and decoder Returns: frame_gen: array of generated frames (Tensors) """ assert all([len > 0 for len in [encoder_len, decoder_future_len, decoder_reconst_len]]) initializer = tf_layers.xavier_initializer(uniform=uniform_init) hidden_repr = encoder_model(frames, encoder_len, initializer, fc_conv_layer=fc_conv_layer) frames_pred = decoder_model(hidden_repr, decoder_future_len, initializer, num_channels=num_channels, scope='decoder_pred', fc_conv_layer=fc_conv_layer) frames_reconst = decoder_model(hidden_repr, decoder_reconst_len, initializer, num_channels=num_channels, scope='decoder_reconst', fc_conv_layer=fc_conv_layer) return frames_pred, frames_reconst, hidden_repr
mit
-1,958,676,165,614,050,800
50.617486
168
0.672983
false
sherpaman/MolToolPy
bin/interface_anal.py
2
10996
#!/usr/bin/env python import os import numpy as np import scipy.optimize as opt import matplotlib.pyplot as plt import MDAnalysis as MD import MDAnalysis.analysis.distances as dist from argparse import ArgumentParser parser = ArgumentParser( description = 'Perform Binding Sites Analysis') # # INPUT FILES # parser.add_argument("-l","--list_sites",dest="list_sites",action="store",type=str,default=None,help="List of binding sites to analyze Matrix",required=False,metavar="TXT") parser.add_argument("-t","--topol",dest="top",action="store",type=str,default=None,help="Topology Path",required=False,metavar="TOPOL") parser.add_argument("-f","--traj",dest="trj",action="store",type=str,default=None,help="Trajectory Path",required=False,metavar="TRAJ") parser.add_argument("--npz",dest="npz",action="store_true",default=False,help="Toggle use of Numpy files instead of trajectory") # # OUTPUT FILES # parser.add_argument("-o","--out",dest="out",action="store",type=str,default=None,help="Output base name",required=True,metavar="FILENAME") # # OTHER OPTIONS # parser.add_argument("-b","--begin",dest="begin",action="store",type=int,default=0,help="First frame to read") parser.add_argument("-e","--end",dest="end",action="store",type=int,default=-1,help="Last frame to read") parser.add_argument("-s","--skip",dest="skip",action="store",type=int,default=1,help="number of frame to skip", metavar="INTEGER") parser.add_argument("-c","--dist_cutoff",dest="cutoff",action="store",type=float,default=7.0,help="Distance Cutoff") options = parser.parse_args() top = options.top trj = options.trj base_out = options.out b = options.begin e = options.end skip = options.skip l_sites = options.list_sites npz = options.npz dist_cutoff = options.cutoff # Distance Cutoff in Aangstrom smooth = 2 conv = np.ones(smooth) def read_list_sites(file_in): with open(file_in) as f: raw = [] for rl in f.readlines(): if rl[0] != "#": raw.append(rl.split()) list_a = [ i for i in raw if i[1]=="A" ] list_b = [ i for i in raw if i[1]=="B" ] return list_a, list_b def calc_acfs(data): acfs = np.zeros(len(data)) conv = np.ones(2, dtype = np.int32) # compute the ACFs for the chunk of binding sites passed in argument # extract the data for this site and this lipid to avoid dimensionality nightmares red = np.copy(data) # first value is the sum of all single-frame occupancy acfs[0] = float(np.sum(red)) # if the ACF reached 0 already, no need to compute the rest for k in range(1,len(data)): if not acfs[k-1]: acfs[k:] = 0 break # ACF at this point red = np.convolve(red, conv, "valid") == 2 acfs[k] = float(np.sum(red)) # normalization acfs = acfs/acfs[0] return acfs def _exp2(x,l1,l2,a): return a * np.exp(-l1*x) + (1-a) * np.exp(-l2*x) def scan_tau_2(a,n=50): nz = np.where(a==0)[0][0] z_range = np.linspace(10,nz,n).astype(int) l = np.zeros([len(z_range),3]) l0 = a[0] - a[1] for i,z in enumerate(z_range): l[i], o = opt.curve_fit(_exp2,np.arange(z),a[:z],p0=[l0,l0/100.,0.5],bounds=( [0.,0.,0.],[np.inf, np.inf,1.0] )) max_l=np.argmax(1./l[:,0]) #return l[max_l], [z_range ,l] return l[max_l] def opt_tau_2(a,n=50): nz = np.where(a==0)[0][0] z_range = np.linspace(10,nz,n).astype(int) l0 = a[0] - a[1] l, o = opt.curve_fit(_exp2,np.arange(nz),a[:nz],p0=[l0,l0/100.,0.5],bounds=( [0.,0.,0.],[np.inf, np.inf,1.0] )) return l if not npz: l_a , l_b = read_list_sites(l_sites) l_traj_a = np.unique([i[0] for i in l_a ]) l_traj_b = np.unique([i[0] for i in l_b ]) l_traj = list(set(l_traj_a) | set(l_traj_b)) l_traj.sort() l_u = [ MD.Universe("{p_top:s}/{n_top:s}.tpr".format(p_top=top,n_top=i),"{p_trj:s}/{n_trj:s}.xtc".format(p_trj=trj,n_trj=i)) for i in l_traj ] text_out = open('{0:s}_ANAL.txt'.format(base_out),'w') # The analysis is performed one trajectory at a time for ntrj,trj in enumerate(l_traj): u = l_u[ntrj] if e == -1: e = len(u.trajectory) # the list of bindibg site to be analyzed for each trajectory is created sub_l_a = [ i for i in l_a if i[0] == trj] sub_l_b = [ i for i in l_b if i[0] == trj] num_a = len(sub_l_a) num_b = len(sub_l_b) s1 = [ [] for i in range(num_a)] s2 = [ [] for j in range(num_b)] name_a = [ '' for i in range(num_a)] name_b = [ '' for j in range(num_b)] # per each couple of binding site "A" - "B" (on the receptor and ligand respectively # the atom selection is stored in a list for n_i,i in enumerate(sub_l_a): n_a = i[2] bs_a = [ int(i[l]) for l in range(3,len(i)) ] str1 = "resnum" for r in bs_a: str1 = "{0:s} {1:d}".format(str1,r) s1[n_i] = u.select_atoms(str1) name_a[n_i]="" for ra in s1[n_i].residues: name_a[n_i] = "{0:s} {1:s}_{2:s}".format(name_a[n_i],ra.resname,str(ra.resid)) for n_j,j in enumerate(sub_l_b): n_b = j[2] bs_b = [ int(j[k]) for k in range(3,len(j)) ] str2 = "resnum" for p in bs_b: str2 = "{0:s} {1:d}".format(str2,p+25) s2[n_j] = u.select_atoms(str2) name_b[n_j]="" for rb in s2[n_j].residues: name_b[n_j] = "{0:s} {1:s}_{2:s}".format(name_b[n_j],rb.resname,str(rb.resid)) print name_a[n_i], name_b[n_j] # A 2-fold nested list is used to store an array of minimal residue-residue distances per each couple "A"-"B" M = [ [ [] for j in np.arange(num_b) ] for i in np.arange(num_a) ] d = np.zeros(2) w = np.zeros(2) o = np.zeros([num_a,num_b,(e-b)/skip]) time = np.zeros([num_a,num_b,(e-b)/skip]) # Per each frame ... for ts in u.trajectory[b:e:skip]: # ... each site in the Receptor (Site A) ... for i in np.arange(num_a): # ... and each site in the Ligand (Site B) ... for j in np.arange(num_b): # ... the minimum distance between each residue in Site A and each residue in Site B is calculated ... M[i][j] = [ [ np.min(dist.distance_array(s_1.positions,s_2.positions,box=u.dimensions)) for s_2 in s2[j].residues] for s_1 in s1[i].residues ] d[0] = np.mean(np.min(M[i][j],axis=0)) # Average minumum distance per each residue in Site A d[1] = np.mean(np.min(M[i][j],axis=1)) # Average minumum distance per each residue in Site B w[0] = np.var(np.min(M[i][j],axis=0)) w[1] = np.var(np.min(M[i][j],axis=1)) time[i,j,(ts.frame-b)/skip-1] = ts.time # ... STD.DEV Weighted Average ... o[i,j,(ts.frame-b)/skip-1] = np.average(d,weights=w) text_out.write("TRAJ {0:s} ANAL\n".format(trj)) np.savez('{0:s}_{1:s}.npz'.format(base_out,trj),occupancy=o,time=time,name_s1=name_a,name_s2=name_b) for i in range(num_a): for j in range(num_b): if smooth > 1: running = np.convolve(o[i,j],conv,"valid")/smooth else: running = o[i,j] occ=100.0 * float(np.sum(running<dist_cutoff))/len(running) if occ > 5.0: acsf = calc_acfs(running<dist_cutoff) try: l = opt_tau_2(acsf) except: print ("Error analysis Traj : {0:s} , [{1:s} - {2:s}]".format(trj,name_a[i],name_b[j])) text_out.close() raise dt = u.trajectory[skip].time - u.trajectory[0].time t1 = (1./l[0]) * dt / 1000.0 t2 = (1./l[1]) * dt / 1000.0 p1 = 100*l[2] p2 = 100-p1 text_out.write( "BINDING : [{0:s}]-[{1:s}] {2:6.2f} , t1={3:.2f} ({4:6.2f}), t2={5:.2f} ({6:6.2f})\n".format(name_a[i],name_b[j],occ,t1,p1,t2,p2)) else: text_out.write( "BINDING : [{0:s}]-[{1:s}] {2:6.2f}\n".format(name_a[i],name_b[j],occ)) else: list_npz = [ i for i in os.listdir('./') if ( i[:len(base_out)] == base_out ) & ( i[-3:]=='npz' ) ] list_npz.sort() text_out = open('{0:s}_ANAL.txt'.format(base_out),'w') for trj in list_npz: print("opening : {0:s}\n".format(trj)) text_out.write("{0:s} ANALYSIS\n".format(trj)) data = np.load(trj) o = data['occupancy'] time = data['time'] name_a = data['name_s1'] name_b = data['name_s2'] num_a = len(name_a) num_b = len(name_b) for i in range(num_a): for j in range(num_b): if smooth > 1: running = np.convolve(o[i,j],conv,"valid")/smooth else: running = o[i,j] occ=100.0 * float(np.sum(running<dist_cutoff))/len(running) if occ > 5.0: b = (running<dist_cutoff).astype(int) bind = 1 - np.trim_zeros(1-b) # Remove Bound satates frome the start and end acsf_off = calc_acfs(bind) acsf_on = calc_acfs(1-b) try: l_off = opt_tau_2(acsf_off) l_on = opt_tau_2(acsf_on) except: print ("Error analysis Traj : {0:s} , [{1:s} - {2:s}]".format(trj,name_a[i],name_b[j])) text_out.close() raise dt = time[i,j,1] - time[i,j,0] # t1_on = (1./l_on[0]) * dt / 1000.0 t2_on = (1./l_on[1]) * dt / 1000.0 p1_on = 100*l_on[2] p2_on = 100-p1_on # t1_off = (1./l_off[0]) * dt / 1000.0 t2_off = (1./l_off[1]) * dt / 1000.0 p1_off = 100*l_off[2] p2_off = 100-p1_off text_out.write( " BINDING :[{0:s}]-[{1:s}]:{2:6.2f}\n".format(name_a[i],name_b[j],occ)) text_out.write( " K_on :t1={0:7.2f}({1:6.2f}):t2={2:7.2f}({3:6.2f})\n".format(t1_on ,p1_on ,t2_on ,p2_on )) text_out.write( " K_off :t1={0:7.2f}({1:6.2f}):t2={2:7.2f}({3:6.2f})\n".format(t1_off,p1_off,t2_off,p2_off)) else: text_out.write( " BINDING :[{0:s}]-[{1:s}]:{2:6.2f}\n".format(name_a[i],name_b[j],occ)) text_out.close() quit()
gpl-2.0
-3,806,128,781,330,948,600
44.251029
171
0.506821
false