repo_name
stringlengths
6
103
path
stringlengths
5
191
copies
stringlengths
1
4
size
stringlengths
4
6
content
stringlengths
986
970k
license
stringclasses
15 values
giorgiop/scikit-learn
examples/linear_model/plot_logistic_multinomial.py
49
2480
""" ==================================================== Plot multinomial and One-vs-Rest Logistic Regression ==================================================== Plot decision surface of multinomial and One-vs-Rest Logistic Regression. The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers are represented by the dashed lines. """ print(__doc__) # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from sklearn.linear_model import LogisticRegression # make 3-class dataset for classification centers = [[-5, 0], [0, 1.5], [5, -1]] X, y = make_blobs(n_samples=1000, centers=centers, random_state=40) transformation = [[0.4, 0.2], [-0.4, 1.2]] X = np.dot(X, transformation) for multi_class in ('multinomial', 'ovr'): clf = LogisticRegression(solver='sag', max_iter=100, random_state=42, multi_class=multi_class).fit(X, y) # print the training scores print("training score : %.3f (%s)" % (clf.score(X, y), multi_class)) # create a mesh to plot in h = .02 # step size in the mesh x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.title("Decision surface of LogisticRegression (%s)" % multi_class) plt.axis('tight') # Plot also the training points colors = "bry" for i, color in zip(clf.classes_, colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired) # Plot the three one-against-all classifiers xmin, xmax = plt.xlim() ymin, ymax = plt.ylim() coef = clf.coef_ intercept = clf.intercept_ def plot_hyperplane(c, color): def line(x0): return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1] plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color) for i, color in zip(clf.classes_, colors): plot_hyperplane(i, color) plt.show()
bsd-3-clause
PatrickOReilly/scikit-learn
examples/neighbors/plot_digits_kde_sampling.py
106
2026
""" ========================= Kernel Density Estimation ========================= This example shows how kernel density estimation (KDE), a powerful non-parametric density estimation technique, can be used to learn a generative model for a dataset. With this generative model in place, new samples can be drawn. These new samples reflect the underlying model of the data. """ import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.neighbors import KernelDensity from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV # load the data digits = load_digits() data = digits.data # project the 64-dimensional data to a lower dimension pca = PCA(n_components=15, whiten=False) data = pca.fit_transform(digits.data) # use grid search cross-validation to optimize the bandwidth params = {'bandwidth': np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(data) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ # sample 44 new points from the data new_data = kde.sample(44, random_state=0) new_data = pca.inverse_transform(new_data) # turn data into a 4x11 grid new_data = new_data.reshape((4, 11, -1)) real_data = digits.data[:44].reshape((4, 11, -1)) # plot real digits and resampled digits fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[])) for j in range(11): ax[4, j].set_visible(False) for i in range(4): im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) ax[0, 5].set_title('Selection from the input data') ax[5, 5].set_title('"New" digits drawn from the kernel density model') plt.show()
bsd-3-clause
giorgiop/scikit-learn
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
90
10801
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import ignore_warnings from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV) def test_sparse_coef(): # Check that the sparse_coef property works clf = ElasticNet() clf.coef_ = [1, 2, 3] assert_true(sp.isspmatrix(clf.sparse_coef_)) assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_) def test_normalize_option(): # Check that the normalize option in enet works X = sp.csc_matrix([[-1], [0], [1]]) y = [-1, 0, 1] clf_dense = ElasticNet(fit_intercept=True, normalize=True) clf_sparse = ElasticNet(fit_intercept=True, normalize=True) clf_dense.fit(X, y) X = sp.csc_matrix(X) clf_sparse.fit(X, y) assert_almost_equal(clf_dense.dual_gap_, 0) assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_) def test_lasso_zero(): # Check that the sparse lasso can handle zero data without crashing X = sp.csc_matrix((3, 1)) y = [0, 0, 0] T = np.array([[1], [2], [3]]) clf = Lasso().fit(X, y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(pred, [0, 0, 0]) assert_almost_equal(clf.dual_gap_, 0) def test_enet_toy_list_input(): # Test ElasticNet for various values of alpha and l1_ratio with list X X = np.array([[-1], [0], [1]]) X = sp.csc_matrix(X) Y = [-1, 0, 1] # just a straight line T = np.array([[2], [3], [4]]) # test sample # this should be the same as unregularized least squares clf = ElasticNet(alpha=0, l1_ratio=1.0) # catch warning about alpha=0. # this is discouraged but should work. ignore_warnings(clf.fit)(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.45454], 3) assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) assert_almost_equal(clf.dual_gap_, 0) def test_enet_toy_explicit_sparse_input(): # Test ElasticNet for various values of alpha and l1_ratio with sparse X f = ignore_warnings # training samples X = sp.lil_matrix((3, 1)) X[0, 0] = -1 # X[1, 0] = 0 X[2, 0] = 1 Y = [-1, 0, 1] # just a straight line (the identity function) # test samples T = sp.lil_matrix((3, 1)) T[0, 0] = 2 T[1, 0] = 3 T[2, 0] = 4 # this should be the same as lasso clf = ElasticNet(alpha=0, l1_ratio=1.0) f(clf.fit)(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.45454], 3) assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) assert_almost_equal(clf.dual_gap_, 0) def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42, positive=False, n_targets=1): random_state = np.random.RandomState(seed) # build an ill-posed linear regression problem with many noisy features and # comparatively few samples # generate a ground truth model w = random_state.randn(n_features, n_targets) w[n_informative:] = 0.0 # only the top features are impacting the model if positive: w = np.abs(w) X = random_state.randn(n_samples, n_features) rnd = random_state.uniform(size=(n_samples, n_features)) X[rnd > 0.5] = 0.0 # 50% of zeros in input signal # generate training ground truth labels y = np.dot(X, w) X = sp.csc_matrix(X) if n_targets == 1: y = np.ravel(y) return X, y def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive): n_samples, n_features, max_iter = 100, 100, 1000 n_informative = 10 X, y = make_sparse_data(n_samples, n_features, n_informative, positive=positive) X_train, X_test = X[n_samples // 2:], X[:n_samples // 2] y_train, y_test = y[n_samples // 2:], y[:n_samples // 2] s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept, max_iter=max_iter, tol=1e-7, positive=positive, warm_start=True) s_clf.fit(X_train, y_train) assert_almost_equal(s_clf.dual_gap_, 0, 4) assert_greater(s_clf.score(X_test, y_test), 0.85) # check the convergence is the same as the dense version d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept, max_iter=max_iter, tol=1e-7, positive=positive, warm_start=True) d_clf.fit(X_train.toarray(), y_train) assert_almost_equal(d_clf.dual_gap_, 0, 4) assert_greater(d_clf.score(X_test, y_test), 0.85) assert_almost_equal(s_clf.coef_, d_clf.coef_, 5) assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5) # check that the coefs are sparse assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative) def test_sparse_enet_not_as_toy_dataset(): _test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False, positive=False) _test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True, positive=False) _test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False, positive=True) _test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True, positive=True) def test_sparse_lasso_not_as_toy_dataset(): n_samples = 100 max_iter = 1000 n_informative = 10 X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative) X_train, X_test = X[n_samples // 2:], X[:n_samples // 2] y_train, y_test = y[n_samples // 2:], y[:n_samples // 2] s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) s_clf.fit(X_train, y_train) assert_almost_equal(s_clf.dual_gap_, 0, 4) assert_greater(s_clf.score(X_test, y_test), 0.85) # check the convergence is the same as the dense version d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7) d_clf.fit(X_train.toarray(), y_train) assert_almost_equal(d_clf.dual_gap_, 0, 4) assert_greater(d_clf.score(X_test, y_test), 0.85) # check that the coefs are sparse assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative) def test_enet_multitarget(): n_targets = 3 X, y = make_sparse_data(n_targets=n_targets) estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None) # XXX: There is a bug when precompute is not None! estimator.fit(X, y) coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_, estimator.dual_gap_) for k in range(n_targets): estimator.fit(X, y[:, k]) assert_array_almost_equal(coef[k, :], estimator.coef_) assert_array_almost_equal(intercept[k], estimator.intercept_) assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) def test_path_parameters(): X, y = make_sparse_data() max_iter = 50 n_alphas = 10 clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, fit_intercept=False) ignore_warnings(clf.fit)(X, y) # new params assert_almost_equal(0.5, clf.l1_ratio) assert_equal(n_alphas, clf.n_alphas) assert_equal(n_alphas, len(clf.alphas_)) sparse_mse_path = clf.mse_path_ ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data assert_almost_equal(clf.mse_path_, sparse_mse_path) def test_same_output_sparse_dense_lasso_and_enet_cv(): X, y = make_sparse_data(n_samples=40, n_features=10) for normalize in [True, False]: clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize) ignore_warnings(clfs.fit)(X, y) clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize) ignore_warnings(clfd.fit)(X.toarray(), y) assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) assert_array_almost_equal(clfs.alphas_, clfd.alphas_) clfs = LassoCV(max_iter=100, cv=4, normalize=normalize) ignore_warnings(clfs.fit)(X, y) clfd = LassoCV(max_iter=100, cv=4, normalize=normalize) ignore_warnings(clfd.fit)(X.toarray(), y) assert_almost_equal(clfs.alpha_, clfd.alpha_, 7) assert_almost_equal(clfs.intercept_, clfd.intercept_, 7) assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_) assert_array_almost_equal(clfs.alphas_, clfd.alphas_) def test_same_multiple_output_sparse_dense(): for normalize in [True, False]: l = ElasticNet(normalize=normalize) X = [[0, 1, 2, 3, 4], [0, 2, 5, 8, 11], [9, 10, 11, 12, 13], [10, 11, 12, 13, 14]] y = [[1, 2, 3, 4, 5], [1, 3, 6, 9, 12], [10, 11, 12, 13, 14], [11, 12, 13, 14, 15]] ignore_warnings(l.fit)(X, y) sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1) predict_dense = l.predict(sample) l_sp = ElasticNet(normalize=normalize) X_sp = sp.coo_matrix(X) ignore_warnings(l_sp.fit)(X_sp, y) sample_sparse = sp.coo_matrix(sample) predict_sparse = l_sp.predict(sample_sparse) assert_array_almost_equal(predict_sparse, predict_dense)
bsd-3-clause
theoryno3/scikit-learn
sklearn/__init__.py
153
3014
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module='^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.17.dev0' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'qda', 'random_projection', 'semi_supervised', 'svm', 'tree', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
bsd-3-clause
PatrickOReilly/scikit-learn
benchmarks/bench_plot_randomized_svd.py
56
17557
""" Benchmarks on the power iterations phase in randomized SVD. We test on various synthetic and real datasets the effect of increasing the number of power iterations in terms of quality of approximation and running time. A number greater than 0 should help with noisy matrices, which are characterized by a slow spectral decay. We test several policy for normalizing the power iterations. Normalization is crucial to avoid numerical issues. The quality of the approximation is measured by the spectral norm discrepancy between the original input matrix and the reconstructed one (by multiplying the randomized_svd's outputs). The spectral norm is always equivalent to the largest singular value of a matrix. (3) justifies this choice. However, one can notice in these experiments that Frobenius and spectral norms behave very similarly in a qualitative sense. Therefore, we suggest to run these benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to compute. The benchmarks follow. (a) plot: time vs norm, varying number of power iterations data: many datasets goal: compare normalization policies and study how the number of power iterations affect time and norm (b) plot: n_iter vs norm, varying rank of data and number of components for randomized_SVD data: low-rank matrices on which we control the rank goal: study whether the rank of the matrix and the number of components extracted by randomized SVD affect "the optimal" number of power iterations (c) plot: time vs norm, varing datasets data: many datasets goal: compare default configurations We compare the following algorithms: - randomized_svd(..., power_iteration_normalizer='none') - randomized_svd(..., power_iteration_normalizer='LU') - randomized_svd(..., power_iteration_normalizer='QR') - randomized_svd(..., power_iteration_normalizer='auto') - fbpca.pca() from https://github.com/facebook/fbpca (if installed) Conclusion ---------- - n_iter=2 appears to be a good default value - power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU gives similar errors to QR but is cheaper. That's what 'auto' implements. References ---------- (1) Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061 (2) A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert (3) An implementation of a randomized algorithm for principal component analysis A. Szlam et al. 2014 """ # Author: Giorgio Patrini import numpy as np import scipy as sp import matplotlib.pyplot as plt import gc import pickle from time import time from collections import defaultdict import os.path from sklearn.utils import gen_batches from sklearn.utils.validation import check_random_state from sklearn.utils.extmath import randomized_svd from sklearn.datasets.samples_generator import (make_low_rank_matrix, make_sparse_uncorrelated) from sklearn.datasets import (fetch_lfw_people, fetch_mldata, fetch_20newsgroups_vectorized, fetch_olivetti_faces, fetch_rcv1) try: import fbpca fbpca_available = True except ImportError: fbpca_available = False # If this is enabled, tests are much slower and will crash with the large data enable_spectral_norm = False # TODO: compute approximate spectral norms with the power method as in # Estimating the largest eigenvalues by the power and Lanczos methods with # a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on # Matrix Analysis and Applications, 13 (4): 1094-1122, 1992. # This approximation is a very fast estimate of the spectral norm, but depends # on starting random vectors. # Determine when to switch to batch computation for matrix norms, # in case the reconstructed (dense) matrix is too large MAX_MEMORY = np.int(2e9) # The following datasets can be dowloaded manually from: # CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz # SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat CIFAR_FOLDER = "./cifar-10-batches-py/" SVHN_FOLDER = "./SVHN/" datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups', 'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix'] big_sparse_datasets = ['big sparse matrix', 'rcv1'] def unpickle(file_name): with open(file_name, 'rb') as fo: return pickle.load(fo, encoding='latin1')["data"] def handle_missing_dataset(file_folder): if not os.path.isdir(file_folder): print("%s file folder not found. Test skipped." % file_folder) return 0 def get_data(dataset_name): print("Getting dataset: %s" % dataset_name) if dataset_name == 'lfw_people': X = fetch_lfw_people().data elif dataset_name == '20newsgroups': X = fetch_20newsgroups_vectorized().data[:, :100000] elif dataset_name == 'olivetti_faces': X = fetch_olivetti_faces().data elif dataset_name == 'rcv1': X = fetch_rcv1().data elif dataset_name == 'CIFAR': if handle_missing_dataset(CIFAR_FOLDER) == "skip": return X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1)) for i in range(5)] X = np.vstack(X1) del X1 elif dataset_name == 'SVHN': if handle_missing_dataset(SVHN_FOLDER) == 0: return X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X'] X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])] X = np.vstack(X2) del X1 del X2 elif dataset_name == 'low rank matrix': X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4), effective_rank=100, tail_strength=.5, random_state=random_state) elif dataset_name == 'uncorrelated matrix': X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000, random_state=random_state) elif dataset_name == 'big sparse matrix': sparsity = np.int(1e6) size = np.int(1e6) small_size = np.int(1e4) data = np.random.normal(0, 1, np.int(sparsity/10)) data = np.repeat(data, 10) row = np.random.uniform(0, small_size, sparsity) col = np.random.uniform(0, small_size, sparsity) X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size)) del data del row del col else: X = fetch_mldata(dataset_name).data return X def plot_time_vs_s(time, norm, point_labels, title): plt.figure() colors = ['g', 'b', 'y'] for i, l in enumerate(sorted(norm.keys())): if l is not "fbpca": plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop()) else: plt.plot(time[l], norm[l], label=l, marker='^', c='red') for label, x, y in zip(point_labels, list(time[l]), list(norm[l])): plt.annotate(label, xy=(x, y), xytext=(0, -20), textcoords='offset points', ha='right', va='bottom') plt.legend(loc="upper right") plt.suptitle(title) plt.ylabel("norm discrepancy") plt.xlabel("running time [s]") def scatter_time_vs_s(time, norm, point_labels, title): plt.figure() size = 100 for i, l in enumerate(sorted(norm.keys())): if l is not "fbpca": plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size) for label, x, y in zip(point_labels, list(time[l]), list(norm[l])): plt.annotate(label, xy=(x, y), xytext=(0, -80), textcoords='offset points', ha='right', arrowprops=dict(arrowstyle="->", connectionstyle="arc3"), va='bottom', size=11, rotation=90) else: plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size) for label, x, y in zip(point_labels, list(time[l]), list(norm[l])): plt.annotate(label, xy=(x, y), xytext=(0, 30), textcoords='offset points', ha='right', arrowprops=dict(arrowstyle="->", connectionstyle="arc3"), va='bottom', size=11, rotation=90) plt.legend(loc="best") plt.suptitle(title) plt.ylabel("norm discrepancy") plt.xlabel("running time [s]") def plot_power_iter_vs_s(power_iter, s, title): plt.figure() for l in sorted(s.keys()): plt.plot(power_iter, s[l], label=l, marker='o') plt.legend(loc="lower right", prop={'size': 10}) plt.suptitle(title) plt.ylabel("norm discrepancy") plt.xlabel("n_iter") def svd_timing(X, n_comps, n_iter, n_oversamples, power_iteration_normalizer='auto', method=None): """ Measure time for decomposition """ print("... running SVD ...") if method is not 'fbpca': gc.collect() t0 = time() U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter, power_iteration_normalizer, random_state=random_state, transpose=False) call_time = time() - t0 else: gc.collect() t0 = time() # There is a different convention for l here U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter, l=n_oversamples+n_comps) call_time = time() - t0 return U, mu, V, call_time def norm_diff(A, norm=2, msg=True): """ Compute the norm diff with the original matrix, when randomized SVD is called with *params. norm: 2 => spectral; 'fro' => Frobenius """ if msg: print("... computing %s norm ..." % norm) if norm == 2: # s = sp.linalg.norm(A, ord=2) # slow value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False) else: if sp.sparse.issparse(A): value = sp.sparse.linalg.norm(A, ord=norm) else: value = sp.linalg.norm(A, ord=norm) return value def scalable_frobenius_norm_discrepancy(X, U, s, V): # if the input is not too big, just call scipy if X.shape[0] * X.shape[1] < MAX_MEMORY: A = X - U.dot(np.diag(s).dot(V)) return norm_diff(A, norm='fro') print("... computing fro norm by batches...") batch_size = 1000 Vhat = np.diag(s).dot(V) cum_norm = .0 for batch in gen_batches(X.shape[0], batch_size): M = X[batch, :] - U[batch, :].dot(Vhat) cum_norm += norm_diff(M, norm='fro', msg=False) return np.sqrt(cum_norm) def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps): all_time = defaultdict(list) if enable_spectral_norm: all_spectral = defaultdict(list) X_spectral_norm = norm_diff(X, norm=2, msg=False) all_frobenius = defaultdict(list) X_fro_norm = norm_diff(X, norm='fro', msg=False) for pi in power_iter: for pm in ['none', 'LU', 'QR']: print("n_iter = %d on sklearn - %s" % (pi, pm)) U, s, V, time = svd_timing(X, n_comps, n_iter=pi, power_iteration_normalizer=pm, n_oversamples=n_oversamples) label = "sklearn - %s" % pm all_time[label].append(time) if enable_spectral_norm: A = U.dot(np.diag(s).dot(V)) all_spectral[label].append(norm_diff(X - A, norm=2) / X_spectral_norm) f = scalable_frobenius_norm_discrepancy(X, U, s, V) all_frobenius[label].append(f / X_fro_norm) if fbpca_available: print("n_iter = %d on fbca" % (pi)) U, s, V, time = svd_timing(X, n_comps, n_iter=pi, power_iteration_normalizer=pm, n_oversamples=n_oversamples, method='fbpca') label = "fbpca" all_time[label].append(time) if enable_spectral_norm: A = U.dot(np.diag(s).dot(V)) all_spectral[label].append(norm_diff(X - A, norm=2) / X_spectral_norm) f = scalable_frobenius_norm_discrepancy(X, U, s, V) all_frobenius[label].append(f / X_fro_norm) if enable_spectral_norm: title = "%s: spectral norm diff vs running time" % (dataset_name) plot_time_vs_s(all_time, all_spectral, power_iter, title) title = "%s: Frobenius norm diff vs running time" % (dataset_name) plot_time_vs_s(all_time, all_frobenius, power_iter, title) def bench_b(power_list): n_samples, n_features = 1000, 10000 data_params = {'n_samples': n_samples, 'n_features': n_features, 'tail_strength': .7, 'random_state': random_state} dataset_name = "low rank matrix %d x %d" % (n_samples, n_features) ranks = [10, 50, 100] if enable_spectral_norm: all_spectral = defaultdict(list) all_frobenius = defaultdict(list) for rank in ranks: X = make_low_rank_matrix(effective_rank=rank, **data_params) if enable_spectral_norm: X_spectral_norm = norm_diff(X, norm=2, msg=False) X_fro_norm = norm_diff(X, norm='fro', msg=False) for n_comp in [np.int(rank/2), rank, rank*2]: label = "rank=%d, n_comp=%d" % (rank, n_comp) print(label) for pi in power_list: U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2, power_iteration_normalizer='LU') if enable_spectral_norm: A = U.dot(np.diag(s).dot(V)) all_spectral[label].append(norm_diff(X - A, norm=2) / X_spectral_norm) f = scalable_frobenius_norm_discrepancy(X, U, s, V) all_frobenius[label].append(f / X_fro_norm) if enable_spectral_norm: title = "%s: spectral norm diff vs n power iteration" % (dataset_name) plot_power_iter_vs_s(power_iter, all_spectral, title) title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name) plot_power_iter_vs_s(power_iter, all_frobenius, title) def bench_c(datasets, n_comps): all_time = defaultdict(list) if enable_spectral_norm: all_spectral = defaultdict(list) all_frobenius = defaultdict(list) for dataset_name in datasets: X = get_data(dataset_name) if X is None: continue if enable_spectral_norm: X_spectral_norm = norm_diff(X, norm=2, msg=False) X_fro_norm = norm_diff(X, norm='fro', msg=False) n_comps = np.minimum(n_comps, np.min(X.shape)) label = "sklearn" print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label)) U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10, method=label) all_time[label].append(time) if enable_spectral_norm: A = U.dot(np.diag(s).dot(V)) all_spectral[label].append(norm_diff(X - A, norm=2) / X_spectral_norm) f = scalable_frobenius_norm_discrepancy(X, U, s, V) all_frobenius[label].append(f / X_fro_norm) if fbpca_available: label = "fbpca" print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label)) U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2, method=label) all_time[label].append(time) if enable_spectral_norm: A = U.dot(np.diag(s).dot(V)) all_spectral[label].append(norm_diff(X - A, norm=2) / X_spectral_norm) f = scalable_frobenius_norm_discrepancy(X, U, s, V) all_frobenius[label].append(f / X_fro_norm) if len(all_time) == 0: raise ValueError("No tests ran. Aborting.") if enable_spectral_norm: title = "normalized spectral norm diff vs running time" scatter_time_vs_s(all_time, all_spectral, datasets, title) title = "normalized Frobenius norm diff vs running time" scatter_time_vs_s(all_time, all_frobenius, datasets, title) if __name__ == '__main__': random_state = check_random_state(1234) power_iter = np.linspace(0, 6, 7, dtype=int) n_comps = 50 for dataset_name in datasets: X = get_data(dataset_name) if X is None: continue print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" % (dataset_name, X.shape[0], X.shape[1])) bench_a(X, dataset_name, power_iter, n_oversamples=2, n_comps=np.minimum(n_comps, np.min(X.shape))) print(" >>>>>> Benching on simulated low rank matrix with variable rank") bench_b(power_iter) print(" >>>>>> Benching sklearn and fbpca default configurations") bench_c(datasets + big_sparse_datasets, n_comps) plt.show()
bsd-3-clause
PatrickOReilly/scikit-learn
sklearn/metrics/__init__.py
26
3527
""" The :mod:`sklearn.metrics` module includes score functions, performance metrics and pairwise metrics and distance computations. """ from .ranking import auc from .ranking import average_precision_score from .ranking import coverage_error from .ranking import label_ranking_average_precision_score from .ranking import label_ranking_loss from .ranking import precision_recall_curve from .ranking import roc_auc_score from .ranking import roc_curve from .classification import accuracy_score from .classification import classification_report from .classification import cohen_kappa_score from .classification import confusion_matrix from .classification import f1_score from .classification import fbeta_score from .classification import hamming_loss from .classification import hinge_loss from .classification import jaccard_similarity_score from .classification import log_loss from .classification import matthews_corrcoef from .classification import precision_recall_fscore_support from .classification import precision_score from .classification import recall_score from .classification import zero_one_loss from .classification import brier_score_loss from . import cluster from .cluster import adjusted_mutual_info_score from .cluster import adjusted_rand_score from .cluster import completeness_score from .cluster import consensus_score from .cluster import homogeneity_completeness_v_measure from .cluster import homogeneity_score from .cluster import mutual_info_score from .cluster import normalized_mutual_info_score from .cluster import fowlkes_mallows_score from .cluster import silhouette_samples from .cluster import silhouette_score from .cluster import calinski_harabaz_score from .cluster import v_measure_score from .pairwise import euclidean_distances from .pairwise import pairwise_distances from .pairwise import pairwise_distances_argmin from .pairwise import pairwise_distances_argmin_min from .pairwise import pairwise_kernels from .regression import explained_variance_score from .regression import mean_absolute_error from .regression import mean_squared_error from .regression import median_absolute_error from .regression import r2_score from .scorer import make_scorer from .scorer import SCORERS from .scorer import get_scorer __all__ = [ 'accuracy_score', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'auc', 'average_precision_score', 'classification_report', 'cluster', 'completeness_score', 'confusion_matrix', 'consensus_score', 'coverage_error', 'euclidean_distances', 'explained_variance_score', 'f1_score', 'fbeta_score', 'get_scorer', 'hamming_loss', 'hinge_loss', 'homogeneity_completeness_v_measure', 'homogeneity_score', 'jaccard_similarity_score', 'label_ranking_average_precision_score', 'label_ranking_loss', 'log_loss', 'make_scorer', 'matthews_corrcoef', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'mutual_info_score', 'normalized_mutual_info_score', 'pairwise_distances', 'pairwise_distances_argmin', 'pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', 'pairwise_kernels', 'precision_recall_curve', 'precision_recall_fscore_support', 'precision_score', 'r2_score', 'recall_score', 'roc_auc_score', 'roc_curve', 'SCORERS', 'silhouette_samples', 'silhouette_score', 'v_measure_score', 'zero_one_loss', 'brier_score_loss', ]
bsd-3-clause
pianomania/scikit-learn
sklearn/datasets/olivetti_faces.py
18
4837
"""Modified Olivetti faces dataset. The original database was available from http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html The version retrieved here comes in MATLAB format from the personal web page of Sam Roweis: http://www.cs.nyu.edu/~roweis/ There are ten different images of each of 40 distinct subjects. For some subjects, the images were taken at different times, varying the lighting, facial expressions (open / closed eyes, smiling / not smiling) and facial details (glasses / no glasses). All the images were taken against a dark homogeneous background with the subjects in an upright, frontal position (with tolerance for some side movement). The original dataset consisted of 92 x 112, while the Roweis version consists of 64x64 images. """ # Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca> # License: BSD 3 clause from io import BytesIO from os.path import exists from os import makedirs try: # Python 2 import urllib2 urlopen = urllib2.urlopen except ImportError: # Python 3 import urllib.request urlopen = urllib.request.urlopen import numpy as np from scipy.io.matlab import loadmat from .base import get_data_home, Bunch from .base import _pkl_filepath from ..utils import check_random_state from ..externals import joblib DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat" TARGET_FILENAME = "olivetti.pkz" # Grab the module-level docstring to use as a description of the # dataset MODULE_DOCS = __doc__ def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0, download_if_missing=True): """Loader for the Olivetti faces data-set from AT&T. Read more in the :ref:`User Guide <olivetti_faces>`. Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. shuffle : boolean, optional If True the order of the dataset is shuffled to avoid having images of the same person grouped. download_if_missing : optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. random_state : optional, integer or RandomState object The seed or the random number generator used to shuffle the data. Returns ------- An object with the following attributes: data : numpy array of shape (400, 4096) Each row corresponds to a ravelled face image of original size 64 x 64 pixels. images : numpy array of shape (400, 64, 64) Each row is a face image corresponding to one of the 40 subjects of the dataset. target : numpy array of shape (400, ) Labels associated to each face image. Those labels are ranging from 0-39 and correspond to the Subject IDs. DESCR : string Description of the modified Olivetti Faces Dataset. Notes ------ This dataset consists of 10 pictures each of 40 individuals. The original database was available from (now defunct) http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html The version retrieved here comes in MATLAB format from the personal web page of Sam Roweis: http://www.cs.nyu.edu/~roweis/ """ data_home = get_data_home(data_home=data_home) if not exists(data_home): makedirs(data_home) filepath = _pkl_filepath(data_home, TARGET_FILENAME) if not exists(filepath): if not download_if_missing: raise IOError("Data not found and `download_if_missing` is False") print('downloading Olivetti faces from %s to %s' % (DATA_URL, data_home)) fhandle = urlopen(DATA_URL) buf = BytesIO(fhandle.read()) mfile = loadmat(buf) faces = mfile['faces'].T.copy() joblib.dump(faces, filepath, compress=6) del mfile else: faces = joblib.load(filepath) # We want floating point data, but float32 is enough (there is only # one byte of precision in the original uint8s anyway) faces = np.float32(faces) faces = faces - faces.min() faces /= faces.max() faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1) # 10 images per class, 400 images total, each class is contiguous. target = np.array([i // 10 for i in range(400)]) if shuffle: random_state = check_random_state(random_state) order = random_state.permutation(len(faces)) faces = faces[order] target = target[order] return Bunch(data=faces.reshape(len(faces), -1), images=faces, target=target, DESCR=MODULE_DOCS)
bsd-3-clause
oduwa/Pic-Numero
PicNumero/glcm.py
2
7508
import numpy as np from scipy import misc from skimage.color import rgb2gray from skimage.feature import greycomatrix, greycoprops import Display import Helper import matplotlib.pyplot as plt from skimage import data from skimage import img_as_ubyte from sklearn import linear_model PATCH_SIZE = 50 FEATURE_SIZE = 4 COUNTS = [65*22, 80*22, 87*22, 40*22, 71*22, 58*22, 77*22, 68*22, 67*22, 61*22, 46*22, 57*22] # started with 1 and 2 LIN_REGRESSION_MODEL_NAME = "../Models/regression.data" def train(): ''' Builds linear regression from wheat images using GLCM properties. Returns: linear regression model ''' if(Helper.unserialize(LIN_REGRESSION_MODEL_NAME) == None): numberOfImages = 12; # TODO: AUTOMATICALLY GET NUMBER OF IMAGES # Get number of images. Remeber to divide by 2 as for every relevant image, # theres also the comparison image. # if ".DS_Store" in os.listdir("Wheat_ROIs"): # numberOfImages = (len(os.listdir("Wheat_ROIs")) - 1)/2; # else: # numberOfImages = len(os.listdir("Wheat_ROIs"))/2; featureList = np.zeros((numberOfImages, FEATURE_SIZE)) # For each ROI image in folder for i in range(1, numberOfImages+1): # Load image filename = "../Wheat_Images/{:03d}.jpg".format(i); img = misc.imread(filename); img_gray = img_as_ubyte(rgb2gray(img)); glcm = greycomatrix(img_gray, [5], [0], 256, symmetric=True, normed=True) dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0] correlation = greycoprops(glcm, 'correlation')[0, 0] homogeneity = greycoprops(glcm, 'homogeneity')[0, 0] energy = greycoprops(glcm, 'energy')[0, 0] feature = np.array([dissimilarity, correlation, homogeneity, energy]) featureList[i-1] = feature #print("{} = {}A + {}B + {}C + {}D".format(filename, dissimilarity, correlation, homogeneity, energy)) #print(feature) # Build regression model regression_model = linear_model.LinearRegression() regression_model.fit(featureList, COUNTS[:numberOfImages]) Helper.serialize(LIN_REGRESSION_MODEL_NAME, regression_model) print("COEFF: {}\nINTERCEPT: {}".format(regression_model.coef_, regression_model.intercept_)) print("SCORE: {}".format(regression_model.score(featureList, COUNTS[:numberOfImages]))) return regression_model def count(filename, model): ''' Returns an estimate of the number of grains in a given wheat image. Args: filename: Name of image file containing grains to be counted. model: regression model for estimating count Returns: estimation of the number of grains in image. ''' img = misc.imread(filename); img_gray = img_as_ubyte(rgb2gray(img)); glcm = greycomatrix(img_gray, [5], [0], 256, symmetric=True, normed=True) dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0] correlation = greycoprops(glcm, 'correlation')[0, 0] homogeneity = greycoprops(glcm, 'homogeneity')[0, 0] energy = greycoprops(glcm, 'energy')[0, 0] feature = np.array([dissimilarity, correlation, homogeneity, energy]) count = model.predict(feature) return count def plot_glcm_properties(): '''Plots different GLCM properties of different areas of the wheat image in order to visualise how the GLCM can split/discriminate between them.''' image = img_as_ubyte(rgb2gray(misc.imread("../Assets/wheat.png"))); #Display.show_image(image, isGray=True) # select some patches from different areas of the image spikelet_locations = [(643, 517), (877, 574), (2129, 649), (1342, 454)] spikelet_patches = [] stalk_locations = [(534, 1056), (1017, 857), (1711, 1365), (2199, 1093)] stalk_patches = [] # Extract patches for loc in spikelet_locations: spikelet_patches.append(image[loc[1]:loc[1] + PATCH_SIZE, loc[0]:loc[0] + PATCH_SIZE]) for loc in stalk_locations: stalk_patches.append(image[loc[1]:loc[1] + PATCH_SIZE, loc[0]:loc[0] + PATCH_SIZE]) # compute some GLCM properties each patch xs = [] ys = [] for patch in (spikelet_patches + stalk_patches): glcm = greycomatrix(patch, [5], [0], 256, symmetric=True, normed=True) xs.append(greycoprops(glcm, 'correlation')[0, 0]) ys.append(greycoprops(glcm, 'dissimilarity')[0, 0]) print("({}, {})".format(greycoprops(glcm, 'dissimilarity')[0, 0], greycoprops(glcm, 'correlation')[0, 0])) # create the figure fig = plt.figure(figsize=(8, 8)) # display original image with locations of patches ax = fig.add_subplot(3, 2, 1) ax.imshow(image, cmap=plt.cm.gray, interpolation='nearest', ) for (x,y) in spikelet_locations: ax.plot(x, y, 'gs', markersize=PATCH_SIZE/4) for (x,y) in stalk_locations: ax.plot(x, y, 'bs', markersize=PATCH_SIZE/4) ax.set_xlabel('Original Image') ax.set_xticks([]) ax.set_yticks([]) ax.axis('image') # for each patch, plot (dissimilarity, correlation) ax = fig.add_subplot(3, 2, 2) ax.plot(xs[:len(spikelet_patches)], ys[:len(spikelet_patches)], 'go', label='Grains') ax.plot(xs[len(stalk_patches):], ys[len(stalk_patches):], 'bo', label='Stalk') ax.set_xlabel('GLCM Correlation') ax.set_ylabel('GLVM Dissimilarity') ax.legend() # display the image patches for i, patch in enumerate(spikelet_patches): ax = fig.add_subplot(3, len(spikelet_patches), len(spikelet_patches)*1 + i + 1) ax.imshow(patch, cmap=plt.cm.gray, interpolation='nearest', ) ax.set_xlabel('Grains %d' % (i + 1)) for i, patch in enumerate(stalk_patches): ax = fig.add_subplot(3, len(stalk_patches), len(stalk_patches)*2 + i + 1) ax.imshow(patch, cmap=plt.cm.gray, interpolation='nearest', ) ax.set_xlabel('Stalk %d' % (i + 1)) plt.show() def plot_residuals(): numberOfImages = 12 residuals = [] featureList = np.zeros((numberOfImages, FEATURE_SIZE)) model = train() # Get feautures for i in range(1, numberOfImages): # Load image filename = "../Wheat_Images/{:03d}.jpg".format(i); img = misc.imread(filename); img_gray = img_as_ubyte(rgb2gray(img)); glcm = greycomatrix(img_gray, [5], [0], 256, symmetric=True, normed=True) dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0] correlation = greycoprops(glcm, 'correlation')[0, 0] homogeneity = greycoprops(glcm, 'homogeneity')[0, 0] energy = greycoprops(glcm, 'energy')[0, 0] feature = np.array([dissimilarity, correlation, homogeneity, energy]) featureList[i-1] = feature # Apply model to data predictions = model.predict(featureList) # Compute residuals for i in range(len(predictions)): e = predictions[i] - COUNTS[i] residuals.append(e) # Plot residual graph plt.figure(1) plt.scatter(predictions, residuals, color='blue') plt.axhline(0, color='black') plt.xlabel('Predictions') plt.ylabel('Residuals') # Plot accuracy graph (ie predicted vs actual) plt.figure(2) plt.scatter(predictions, COUNTS, color='blue') plt.plot(range(-500, 2500, 250), range(-500, 2500, 250), color='black', linestyle='dotted') plt.xlim(xmin=0) plt.ylim(ymin=0) plt.xlabel('Predicted') plt.ylabel('Actual') plt.show() #plot_residuals()
mit
D-K-E/cltk
src/cltk/phonology/syllabifier_processes.py
4
7846
"""This module implements syllabification processes for several languages. You may extend **SyllabificationProcess** and see pre-defined examples. """ from copy import deepcopy from dataclasses import dataclass from boltons.cacheutils import cachedproperty from cltk.core.data_types import Doc, Process from cltk.phonology.ang.phonology import OldEnglishSyllabifier from cltk.phonology.enm.phonology import MiddleEnglishSyllabifier from cltk.phonology.gmh.phonology import MiddleHighGermanSyllabifier from cltk.phonology.lat.phonology import LatinSyllabifier from cltk.phonology.non.phonology import OldNorseSyllabifier @dataclass class SyllabificationProcess(Process): """This is the class to extend if you want to code your own syllabification process in the CLTK-style. """ def run(self, input_doc: Doc) -> Doc: syllabifier = self.algorithm output_doc = deepcopy(input_doc) for word in output_doc.words: word.syllables = syllabifier(word.string.lower()) return output_doc class GreekSyllabificationProcess(SyllabificationProcess): """Syllabification ``Process`` for Ancient Greek. >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.tokenizers.processes import GreekTokenizationProcess >>> from cltk.text.processes import DefaultPunctuationRemovalProcess >>> from cltk.languages.utils import get_lang >>> from cltk.languages.example_texts import get_example_text >>> from cltk import NLP >>> a_pipeline = Pipeline(description="A custom Greek pipeline", processes=[GreekTokenizationProcess, DefaultPunctuationRemovalProcess, GreekSyllabificationProcess], language=get_lang("grc")) >>> nlp = NLP(language='grc', custom_pipeline=a_pipeline, suppress_banner=True) >>> text = get_example_text("grc") >>> cltk_doc = nlp(text) >>> [word.syllables for word in cltk_doc.words[:5]] [['ὅτι'], ['μὲν'], ['ὑμ', 'εῖς'], ['ὦ'], ['ἄν', 'δρ', 'ες']] """ description = "The default Latin Syllabification process" @cachedproperty def algorithm(self): return LatinSyllabifier() class LatinSyllabificationProcess(SyllabificationProcess): """Syllabification ``Process`` for Latin. >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.tokenizers.processes import LatinTokenizationProcess >>> from cltk.text.processes import DefaultPunctuationRemovalProcess >>> from cltk.languages.utils import get_lang >>> from cltk.languages.example_texts import get_example_text >>> from cltk import NLP >>> a_pipeline = Pipeline(description="A custom Latin pipeline", processes=[LatinTokenizationProcess, DefaultPunctuationRemovalProcess, LatinSyllabificationProcess], language=get_lang("lat")) >>> nlp = NLP(language='lat', custom_pipeline=a_pipeline, suppress_banner=True) >>> text = get_example_text("lat") >>> cltk_doc = nlp(text) >>> [word.syllables for word in cltk_doc.words[:5]] [['gal', 'li', 'a'], ['est'], ['om', 'nis'], ['di', 'vi', 'sa'], ['in']] """ description = "The default Latin Syllabification process" @cachedproperty def algorithm(self): return LatinSyllabifier() class MiddleEnglishSyllabificationProcess(SyllabificationProcess): """Syllabification ``Process`` for Middle English. >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.tokenizers.processes import MiddleEnglishTokenizationProcess >>> from cltk.text.processes import DefaultPunctuationRemovalProcess >>> from cltk.languages.utils import get_lang >>> from cltk.languages.example_texts import get_example_text >>> from cltk.nlp import NLP >>> pipe = Pipeline(description="A custom Middle English pipeline", \ processes=[MiddleEnglishTokenizationProcess, DefaultPunctuationRemovalProcess, MiddleEnglishSyllabificationProcess], \ language=get_lang("enm")) >>> nlp = NLP(language='enm', custom_pipeline=pipe, suppress_banner=True) >>> text = get_example_text("enm").replace('\\n', ' ') >>> cltk_doc = nlp(text) >>> [word.syllables for word in cltk_doc.words[:5]] [['whi', 'lom'], ['as'], ['ol', 'de'], ['sto', 'ries'], ['tellen']] """ description = "The default Middle English Syllabification process" @cachedproperty def algorithm(self): return MiddleEnglishSyllabifier() class MiddleHighGermanSyllabificationProcess(SyllabificationProcess): """Syllabification ``Process`` for Middle High German. >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.tokenizers.processes import MiddleHighGermanTokenizationProcess >>> from cltk.text.processes import DefaultPunctuationRemovalProcess >>> from cltk.languages.utils import get_lang >>> from cltk.languages.example_texts import get_example_text >>> from cltk.nlp import NLP >>> pipe = Pipeline(description="A custom Middle High German pipeline", \ processes=[MiddleHighGermanTokenizationProcess, DefaultPunctuationRemovalProcess, \ MiddleHighGermanSyllabificationProcess], language=get_lang("gmh")) >>> nlp = NLP(language='gmh', custom_pipeline=pipe, suppress_banner=True) >>> text = get_example_text("gmh") >>> cltk_doc = nlp(text) >>> [word.syllables for word in cltk_doc.words[:5]] [['uns'], ['ist'], ['in'], ['al', 'ten'], ['mæ', 'ren']] """ description = "The default Middle High German syllabification process" @cachedproperty def algorithm(self): return MiddleHighGermanSyllabifier() class OldEnglishSyllabificationProcess(SyllabificationProcess): """Syllabification ``Process`` for Old English. >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.tokenizers.processes import MiddleEnglishTokenizationProcess >>> from cltk.text.processes import DefaultPunctuationRemovalProcess >>> from cltk.languages.utils import get_lang >>> from cltk.languages.example_texts import get_example_text >>> from cltk.nlp import NLP >>> pipe = Pipeline(description="A custom Old English pipeline", \ processes=[MiddleEnglishTokenizationProcess, DefaultPunctuationRemovalProcess, OldEnglishSyllabificationProcess], \ language=get_lang("ang")) >>> nlp = NLP(language='ang', custom_pipeline=pipe, suppress_banner=True) >>> text = get_example_text("ang") >>> cltk_doc = nlp(text) >>> [word.syllables for word in cltk_doc.words[:5]] [['hwæt'], ['we'], ['gar', 'den', 'a'], ['in'], ['gear', 'da', 'gum']] """ description = "The default Old English syllabification process" @cachedproperty def algorithm(self): return OldEnglishSyllabifier() class OldNorseSyllabificationProcess(SyllabificationProcess): """Syllabification ``Process`` for Old Norse. >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.tokenizers.processes import OldNorseTokenizationProcess >>> from cltk.text.processes import OldNorsePunctuationRemovalProcess >>> from cltk.languages.utils import get_lang >>> from cltk.languages.example_texts import get_example_text >>> from cltk.nlp import NLP >>> pipe = Pipeline(description="A custom Old Norse pipeline", \ processes=[OldNorseTokenizationProcess, OldNorsePunctuationRemovalProcess, OldNorseSyllabificationProcess], \ language=get_lang("non")) >>> nlp = NLP(language='non', custom_pipeline=pipe, suppress_banner=True) >>> text = get_example_text("non") >>> cltk_doc = nlp(text) >>> [word.syllables for word in cltk_doc.words[:5]] [['gyl', 'fi'], ['ko', 'nungr'], ['réð'], ['þar'], ['lön', 'dum']] """ description = "The default Old Norse syllabification process" @cachedproperty def algorithm(self): return OldNorseSyllabifier()
mit
giorgiop/scikit-learn
sklearn/utils/sparsetools/tests/test_traversal.py
38
2018
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_array_almost_equal from sklearn.utils.testing import SkipTest try: from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\ csgraph_to_dense, csgraph_from_dense except ImportError: # Oldish versions of scipy don't have that csgraph_from_dense = None def test_graph_breadth_first(): if csgraph_from_dense is None: raise SkipTest("Old version of scipy, doesn't have csgraph.") csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0, 1, 2, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 7, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) for directed in [True, False]: bfirst_test = breadth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst) def test_graph_depth_first(): if csgraph_from_dense is None: raise SkipTest("Old version of scipy, doesn't have csgraph.") csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) dfirst = np.array([[0, 1, 0, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 0, 0], [0, 0, 7, 0, 0], [0, 0, 0, 1, 0]]) for directed in [True, False]: dfirst_test = depth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(dfirst_test), dfirst)
bsd-3-clause
yyjiang/scikit-learn
benchmarks/bench_plot_lasso_path.py
299
4003
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) #ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) #ax.legend() i += 1 plt.show()
bsd-3-clause
giorgiop/scikit-learn
sklearn/utils/multiclass.py
41
14732
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi # # License: BSD 3 clause """ Multi-class / multi-label utility function ========================================== """ from __future__ import division from collections import Sequence from itertools import chain from scipy.sparse import issparse from scipy.sparse.base import spmatrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix import numpy as np from ..externals.six import string_types from .validation import check_array from ..utils.fixes import bincount from ..utils.fixes import array_equal def _unique_multiclass(y): if hasattr(y, '__array__'): return np.unique(np.asarray(y)) else: return set(y) def _unique_indicator(y): return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1]) _FN_UNIQUE_LABELS = { 'binary': _unique_multiclass, 'multiclass': _unique_multiclass, 'multilabel-indicator': _unique_indicator, } def unique_labels(*ys): """Extract an ordered array of unique labels We don't allow: - mix of multilabel and multiclass (single label) targets - mix of label indicator matrix and anything else, because there are no explicit labels) - mix of label indicator matrices of different sizes - mix of string and integer labels At the moment, we also don't allow "multiclass-multioutput" input type. Parameters ---------- *ys : array-likes, Returns ------- out : numpy array of shape [n_unique_labels] An ordered array of unique labels. Examples -------- >>> from sklearn.utils.multiclass import unique_labels >>> unique_labels([3, 5, 5, 5, 7, 7]) array([3, 5, 7]) >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4]) array([1, 2, 3, 4]) >>> unique_labels([1, 2, 10], [5, 11]) array([ 1, 2, 5, 10, 11]) """ if not ys: raise ValueError('No argument has been passed.') # Check that we don't mix label format ys_types = set(type_of_target(x) for x in ys) if ys_types == set(["binary", "multiclass"]): ys_types = set(["multiclass"]) if len(ys_types) > 1: raise ValueError("Mix type of y not allowed, got types %s" % ys_types) label_type = ys_types.pop() # Check consistency for the indicator format if (label_type == "multilabel-indicator" and len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1): raise ValueError("Multi-label binary indicator input with " "different numbers of labels") # Get the unique set of labels _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None) if not _unique_labels: raise ValueError("Unknown label type: %s" % repr(ys)) ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys)) # Check that we don't mix string type with number type if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1): raise ValueError("Mix of label input types (string and number)") return np.array(sorted(ys_labels)) def _is_integral_float(y): return y.dtype.kind == 'f' and np.all(y.astype(int) == y) def is_multilabel(y): """ Check if ``y`` is in a multilabel format. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- out : bool, Return ``True``, if ``y`` is in a multilabel format, else ```False``. Examples -------- >>> import numpy as np >>> from sklearn.utils.multiclass import is_multilabel >>> is_multilabel([0, 1, 0, 1]) False >>> is_multilabel([[1], [0, 2], []]) False >>> is_multilabel(np.array([[1, 0], [0, 0]])) True >>> is_multilabel(np.array([[1], [0], [0]])) False >>> is_multilabel(np.array([[1, 0, 0]])) True """ if hasattr(y, '__array__'): y = np.asarray(y) if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1): return False if issparse(y): if isinstance(y, (dok_matrix, lil_matrix)): y = y.tocsr() return (len(y.data) == 0 or np.unique(y.data).size == 1 and (y.dtype.kind in 'biu' or # bool, int, uint _is_integral_float(np.unique(y.data)))) else: labels = np.unique(y) return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint _is_integral_float(labels)) def check_classification_targets(y): """Ensure that target y is of a non-regression type. Only the following target types (as defined in type_of_target) are allowed: 'binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences' Parameters ---------- y : array-like """ y_type = type_of_target(y) if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', 'multilabel-indicator', 'multilabel-sequences']: raise ValueError("Unknown label type: %r" % y_type) def type_of_target(y): """Determine the type of data indicated by target `y` Parameters ---------- y : array-like Returns ------- target_type : string One of: * 'continuous': `y` is an array-like of floats that are not all integers, and is 1d or a column vector. * 'continuous-multioutput': `y` is a 2d array of floats that are not all integers, and both dimensions are of size > 1. * 'binary': `y` contains <= 2 discrete values and is 1d or a column vector. * 'multiclass': `y` contains more than two discrete values, is not a sequence of sequences, and is 1d or a column vector. * 'multiclass-multioutput': `y` is a 2d array that contains more than two discrete values, is not a sequence of sequences, and both dimensions are of size > 1. * 'multilabel-indicator': `y` is a label indicator matrix, an array of two dimensions with at least two columns, and at most 2 unique values. * 'unknown': `y` is array-like but none of the above, such as a 3d array, sequence of sequences, or an array of non-sequence objects. Examples -------- >>> import numpy as np >>> type_of_target([0.1, 0.6]) 'continuous' >>> type_of_target([1, -1, -1, 1]) 'binary' >>> type_of_target(['a', 'b', 'a']) 'binary' >>> type_of_target([1.0, 2.0]) 'binary' >>> type_of_target([1, 0, 2]) 'multiclass' >>> type_of_target([1.0, 0.0, 3.0]) 'multiclass' >>> type_of_target(['a', 'b', 'c']) 'multiclass' >>> type_of_target(np.array([[1, 2], [3, 1]])) 'multiclass-multioutput' >>> type_of_target([[1, 2]]) 'multiclass-multioutput' >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]])) 'continuous-multioutput' >>> type_of_target(np.array([[0, 1], [1, 1]])) 'multilabel-indicator' """ valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__')) and not isinstance(y, string_types)) if not valid: raise ValueError('Expected array-like (array or non-string sequence), ' 'got %r' % y) if is_multilabel(y): return 'multilabel-indicator' try: y = np.asarray(y) except ValueError: # Known to fail in numpy 1.3 for array of arrays return 'unknown' # The old sequence of sequences format try: if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence) and not isinstance(y[0], string_types)): raise ValueError('You appear to be using a legacy multi-label data' ' representation. Sequence of sequences are no' ' longer supported; use a binary array or sparse' ' matrix instead.') except IndexError: pass # Invalid inputs if y.ndim > 2 or (y.dtype == object and len(y) and not isinstance(y.flat[0], string_types)): return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"] if y.ndim == 2 and y.shape[1] == 0: return 'unknown' # [[]] if y.ndim == 2 and y.shape[1] > 1: suffix = "-multioutput" # [[1, 2], [1, 2]] else: suffix = "" # [1, 2, 3] or [[1], [2], [3]] # check float and contains non-integer float values if y.dtype.kind == 'f' and np.any(y != y.astype(int)): # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.] return 'continuous' + suffix if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1): return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]] else: return 'binary' # [1, 2] or [["a"], ["b"]] def _check_partial_fit_first_call(clf, classes=None): """Private helper function for factorizing common classes param logic Estimators that implement the ``partial_fit`` API need to be provided with the list of possible classes at the first call to partial_fit. Subsequent calls to partial_fit should check that ``classes`` is still consistent with a previous value of ``clf.classes_`` when provided. This function returns True if it detects that this was the first call to ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also set on ``clf``. """ if getattr(clf, 'classes_', None) is None and classes is None: raise ValueError("classes must be passed on the first call " "to partial_fit.") elif classes is not None: if getattr(clf, 'classes_', None) is not None: if not array_equal(clf.classes_, unique_labels(classes)): raise ValueError( "`classes=%r` is not the same as on last call " "to partial_fit, was: %r" % (classes, clf.classes_)) else: # This is the first call to partial_fit clf.classes_ = unique_labels(classes) return True # classes is None and clf.classes_ has already previously been set: # nothing to do return False def class_distribution(y, sample_weight=None): """Compute class priors from multioutput-multiclass target data Parameters ---------- y : array like or sparse matrix of size (n_samples, n_outputs) The labels for each example. sample_weight : array-like of shape = (n_samples,), optional Sample weights. Returns ------- classes : list of size n_outputs of arrays of size (n_classes,) List of classes for each column. n_classes : list of integers of size n_outputs Number of classes in each column class_prior : list of size n_outputs of arrays of size (n_classes,) Class distribution of each column. """ classes = [] n_classes = [] class_prior = [] n_samples, n_outputs = y.shape if issparse(y): y = y.tocsc() y_nnz = np.diff(y.indptr) for k in range(n_outputs): col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]] # separate sample weights for zero and non-zero elements if sample_weight is not None: nz_samp_weight = np.asarray(sample_weight)[col_nonzero] zeros_samp_weight_sum = (np.sum(sample_weight) - np.sum(nz_samp_weight)) else: nz_samp_weight = None zeros_samp_weight_sum = y.shape[0] - y_nnz[k] classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]], return_inverse=True) class_prior_k = bincount(y_k, weights=nz_samp_weight) # An explicit zero was found, combine its weight with the weight # of the implicit zeros if 0 in classes_k: class_prior_k[classes_k == 0] += zeros_samp_weight_sum # If an there is an implicit zero and it is not in classes and # class_prior, make an entry for it if 0 not in classes_k and y_nnz[k] < y.shape[0]: classes_k = np.insert(classes_k, 0, 0) class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum) classes.append(classes_k) n_classes.append(classes_k.shape[0]) class_prior.append(class_prior_k / class_prior_k.sum()) else: for k in range(n_outputs): classes_k, y_k = np.unique(y[:, k], return_inverse=True) classes.append(classes_k) n_classes.append(classes_k.shape[0]) class_prior_k = bincount(y_k, weights=sample_weight) class_prior.append(class_prior_k / class_prior_k.sum()) return (classes, n_classes, class_prior) def _ovr_decision_function(predictions, confidences, n_classes): """Compute a continuous, tie-breaking ovr decision function. It is important to include a continuous value, not only votes, to make computing AUC or calibration meaningful. Parameters ---------- predictions : array-like, shape (n_samples, n_classifiers) Predicted classes for each binary classifier. confidences : array-like, shape (n_samples, n_classifiers) Decision functions or predicted probabilities for positive class for each binary classifier. n_classes : int Number of classes. n_classifiers must be ``n_classes * (n_classes - 1 ) / 2`` """ n_samples = predictions.shape[0] votes = np.zeros((n_samples, n_classes)) sum_of_confidences = np.zeros((n_samples, n_classes)) k = 0 for i in range(n_classes): for j in range(i + 1, n_classes): sum_of_confidences[:, i] -= confidences[:, k] sum_of_confidences[:, j] += confidences[:, k] votes[predictions[:, k] == 0, i] += 1 votes[predictions[:, k] == 1, j] += 1 k += 1 max_confidences = sum_of_confidences.max() min_confidences = sum_of_confidences.min() if max_confidences == min_confidences: return votes # Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes. # The motivation is to use confidence levels as a way to break ties in # the votes without switching any decision made based on a difference # of 1 vote. eps = np.finfo(sum_of_confidences.dtype).eps max_abs_confidence = max(abs(max_confidences), abs(min_confidences)) scale = (0.5 - eps) / max_abs_confidence return votes + sum_of_confidences * scale
bsd-3-clause
astocko/statsmodels
statsmodels/datasets/elnino/data.py
25
1779
"""El Nino dataset, 1950 - 2010""" __docformat__ = 'restructuredtext' COPYRIGHT = """This data is in the public domain.""" TITLE = """El Nino - Sea Surface Temperatures""" SOURCE = """ National Oceanic and Atmospheric Administration's National Weather Service ERSST.V3B dataset, Nino 1+2 http://www.cpc.ncep.noaa.gov/data/indices/ """ DESCRSHORT = """Averaged monthly sea surface temperature - Pacific Ocean.""" DESCRLONG = """This data contains the averaged monthly sea surface temperature in degrees Celcius of the Pacific Ocean, between 0-10 degrees South and 90-80 degrees West, from 1950 to 2010. This dataset was obtained from NOAA. """ NOTE = """:: Number of Observations - 61 x 12 Number of Variables - 1 Variable name definitions:: TEMPERATURE - average sea surface temperature in degrees Celcius (12 columns, one per month). """ from numpy import recfromtxt, column_stack, array from pandas import DataFrame from statsmodels.datasets.utils import Dataset from os.path import dirname, abspath def load(): """ Load the El Nino data and return a Dataset class. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. Notes ----- The elnino Dataset instance does not contain endog and exog attributes. """ data = _get_data() names = data.dtype.names dataset = Dataset(data=data, names=names) return dataset def load_pandas(): dataset = load() dataset.data = DataFrame(dataset.data) return dataset def _get_data(): filepath = dirname(abspath(__file__)) data = recfromtxt(open(filepath + '/elnino.csv', 'rb'), delimiter=",", names=True, dtype=float) return data
bsd-3-clause
giorgiop/scikit-learn
sklearn/datasets/svmlight_format.py
41
16768
"""This module implements a loader and dumper for the svmlight format This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. """ # Authors: Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from contextlib import closing import io import os.path import numpy as np import scipy.sparse as sp from ._svmlight_format import _load_svmlight_file from .. import __version__ from ..externals import six from ..externals.six import u, b from ..externals.six.moves import range, zip from ..utils import check_array from ..utils.fixes import frombuffer_empty def load_svmlight_file(f, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False): """Load datasets in the svmlight / libsvm format into sparse CSR matrix This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. Parsing a text based source can be expensive. When working on repeatedly on the same dataset, it is recommended to wrap this loader with joblib.Memory.cache to store a memmapped backup of the CSR results of the first call and benefit from the near instantaneous loading of memmapped structures for the subsequent calls. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. This implementation is written in Cython and is reasonably fast. However, a faster API-compatible loader is also available at: https://github.com/mblondel/svmlight-loader Parameters ---------- f : {str, file-like, int} (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. A file-like or file descriptor will not be closed by this function. A file-like object must be opened in binary mode. n_features : int or None The number of features to use. If None, it will be inferred. This argument is useful to load several files that are subsets of a bigger sliced dataset: each subset might not have examples of every feature, hence the inferred shape might vary from one slice to another. multilabel : boolean, optional, default False Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) zero_based : boolean or "auto", optional, default "auto" Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe. query_id : boolean, default False If True, will return the query_id array for each file. dtype : numpy data type, default np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. Returns ------- X : scipy.sparse matrix of shape (n_samples, n_features) y : ndarray of shape (n_samples,), or, in the multilabel a list of tuples of length n_samples. query_id : array of shape (n_samples,) query_id for each sample. Only returned when query_id is set to True. See also -------- load_svmlight_files: similar function for loading multiple files in this format, enforcing the same number of features/columns on all of them. Examples -------- To use joblib.Memory to cache the svmlight file:: from sklearn.externals.joblib import Memory from sklearn.datasets import load_svmlight_file mem = Memory("./mycache") @mem.cache def get_data(): data = load_svmlight_file("mysvmlightfile") return data[0], data[1] X, y = get_data() """ return tuple(load_svmlight_files([f], n_features, dtype, multilabel, zero_based, query_id)) def _gen_open(f): if isinstance(f, int): # file descriptor return io.open(f, "rb", closefd=False) elif not isinstance(f, six.string_types): raise TypeError("expected {str, int, file-like}, got %s" % type(f)) _, ext = os.path.splitext(f) if ext == ".gz": import gzip return gzip.open(f, "rb") elif ext == ".bz2": from bz2 import BZ2File return BZ2File(f, "rb") else: return open(f, "rb") def _open_and_load(f, dtype, multilabel, zero_based, query_id): if hasattr(f, "read"): actual_dtype, data, ind, indptr, labels, query = \ _load_svmlight_file(f, dtype, multilabel, zero_based, query_id) # XXX remove closing when Python 2.7+/3.1+ required else: with closing(_gen_open(f)) as f: actual_dtype, data, ind, indptr, labels, query = \ _load_svmlight_file(f, dtype, multilabel, zero_based, query_id) # convert from array.array, give data the right dtype if not multilabel: labels = frombuffer_empty(labels, np.float64) data = frombuffer_empty(data, actual_dtype) indices = frombuffer_empty(ind, np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) # never empty query = frombuffer_empty(query, np.int64) data = np.asarray(data, dtype=dtype) # no-op for float{32,64} return data, indices, indptr, labels, query def load_svmlight_files(files, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False): """Load dataset from multiple files in SVMlight format This function is equivalent to mapping load_svmlight_file over a list of files, except that the results are concatenated into a single, flat list and the samples vectors are constrained to all have the same number of features. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. Parameters ---------- files : iterable over {str, file-like, int} (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. File-likes and file descriptors will not be closed by this function. File-like objects must be opened in binary mode. n_features : int or None The number of features to use. If None, it will be inferred from the maximum column index occurring in any of the files. This can be set to a higher value than the actual number of features in any of the input files, but setting it to a lower value will cause an exception to be raised. multilabel : boolean, optional Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) zero_based : boolean or "auto", optional Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe. query_id : boolean, defaults to False If True, will return the query_id array for each file. dtype : numpy data type, default np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. Returns ------- [X1, y1, ..., Xn, yn] where each (Xi, yi) pair is the result from load_svmlight_file(files[i]). If query_id is set to True, this will return instead [X1, y1, q1, ..., Xn, yn, qn] where (Xi, yi, qi) is the result from load_svmlight_file(files[i]) Notes ----- When fitting a model to a matrix X_train and evaluating it against a matrix X_test, it is essential that X_train and X_test have the same number of features (X_train.shape[1] == X_test.shape[1]). This may not be the case if you load the files individually with load_svmlight_file. See also -------- load_svmlight_file """ r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id)) for f in files] if (zero_based is False or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)): for ind in r: indices = ind[1] indices -= 1 n_f = max(ind[1].max() for ind in r) + 1 if n_features is None: n_features = n_f elif n_features < n_f: raise ValueError("n_features was set to {}," " but input file contains {} features" .format(n_features, n_f)) result = [] for data, indices, indptr, y, query_values in r: shape = (indptr.shape[0] - 1, n_features) X = sp.csr_matrix((data, indices, indptr), shape) X.sort_indices() result += X, y if query_id: result.append(query_values) return result def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id): X_is_sp = int(hasattr(X, "tocsr")) y_is_sp = int(hasattr(y, "tocsr")) if X.dtype.kind == 'i': value_pattern = u("%d:%d") else: value_pattern = u("%d:%.16g") if y.dtype.kind == 'i': label_pattern = u("%d") else: label_pattern = u("%.16g") line_pattern = u("%s") if query_id is not None: line_pattern += u(" qid:%d") line_pattern += u(" %s\n") if comment: f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__)) f.write(b("# Column indices are %s-based\n" % ["zero", "one"][one_based])) f.write(b("#\n")) f.writelines(b("# %s\n" % line) for line in comment.splitlines()) for i in range(X.shape[0]): if X_is_sp: span = slice(X.indptr[i], X.indptr[i + 1]) row = zip(X.indices[span], X.data[span]) else: nz = X[i] != 0 row = zip(np.where(nz)[0], X[i, nz]) s = " ".join(value_pattern % (j + one_based, x) for j, x in row) if multilabel: if y_is_sp: nz_labels = y[i].nonzero()[1] else: nz_labels = np.where(y[i] != 0)[0] labels_str = ",".join(label_pattern % j for j in nz_labels) else: if y_is_sp: labels_str = label_pattern % y.data[i] else: labels_str = label_pattern % y[i] if query_id is not None: feat = (labels_str, query_id[i], s) else: feat = (labels_str, s) f.write((line_pattern % feat).encode('ascii')) def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None, multilabel=False): """Dump the dataset in svmlight / libsvm file format. This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)] Target values. Class labels must be an integer or float, or array-like objects of integer or float for multilabel classifications. f : string or file-like in binary mode If string, specifies the path that will contain the data. If file-like, data will be written to f. f should be opened in binary mode. zero_based : boolean, optional Whether column indices should be written zero-based (True) or one-based (False). comment : string, optional Comment to insert at the top of the file. This should be either a Unicode string, which will be encoded as UTF-8, or an ASCII byte string. If a comment is given, then it will be preceded by one that identifies the file as having been dumped by scikit-learn. Note that not all tools grok comments in SVMlight files. query_id : array-like, shape = [n_samples] Array containing pairwise preference constraints (qid in svmlight format). multilabel : boolean, optional Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) .. versionadded:: 0.17 parameter *multilabel* to support multilabel datasets. """ if comment is not None: # Convert comment string to list of lines in UTF-8. # If a byte string is passed, then check whether it's ASCII; # if a user wants to get fancy, they'll have to decode themselves. # Avoid mention of str and unicode types for Python 3.x compat. if isinstance(comment, bytes): comment.decode("ascii") # just for the exception else: comment = comment.encode("utf-8") if six.b("\0") in comment: raise ValueError("comment string contains NUL byte") yval = check_array(y, accept_sparse='csr', ensure_2d=False) if sp.issparse(yval): if yval.shape[1] != 1 and not multilabel: raise ValueError("expected y of shape (n_samples, 1)," " got %r" % (yval.shape,)) else: if yval.ndim != 1 and not multilabel: raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,)) Xval = check_array(X, accept_sparse='csr') if Xval.shape[0] != yval.shape[0]: raise ValueError("X.shape[0] and y.shape[0] should be the same, got" " %r and %r instead." % (Xval.shape[0], yval.shape[0])) # We had some issues with CSR matrices with unsorted indices (e.g. #1501), # so sort them here, but first make sure we don't modify the user's X. # TODO We can do this cheaper; sorted_indices copies the whole matrix. if yval is y and hasattr(yval, "sorted_indices"): y = yval.sorted_indices() else: y = yval if hasattr(y, "sort_indices"): y.sort_indices() if Xval is X and hasattr(Xval, "sorted_indices"): X = Xval.sorted_indices() else: X = Xval if hasattr(X, "sort_indices"): X.sort_indices() if query_id is not None: query_id = np.asarray(query_id) if query_id.shape[0] != y.shape[0]: raise ValueError("expected query_id of shape (n_samples,), got %r" % (query_id.shape,)) one_based = not zero_based if hasattr(f, "write"): _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) else: with open(f, "wb") as f: _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
bsd-3-clause
ieguinoa/tools-iuc
tools/fsd/fsd.py
12
44897
#!/usr/bin/env python # Family size distribution of SSCSs # # Author: Monika Heinzl, Johannes-Kepler University Linz (Austria) # Contact: monika.heinzl@edumail.at # # Takes at least one TABULAR file with tags before the alignment to the SSCS, but up to 4 files can be provided, as input. # The program produces a plot which shows the distribution of family sizes of the all SSCSs from the input files and # a tabular file with the data of the plot, as well as a TXT file with all tags of the DCS and their family sizes. # If only one file is provided, then a family size distribution, which is separated after SSCSs without a partner and DCSs, is produced. # Whereas a family size distribution with multiple data in one plot is produced, when more than one file (up to 4) is given. # USAGE: python FSD_Galaxy_1.4_commandLine_FINAL.py --inputFile1 filename --inputName1 filename --inputFile2 filename2 --inputName2 filename2 --inputFile3 filename3 --inputName3 filename3 --inputFile4 filename4 --inputName4 filename4 --log_axis --output_tabular outptufile_name_tabular --output_pdf outptufile_name_pdf import argparse import sys import matplotlib.pyplot as plt import numpy from matplotlib.backends.backend_pdf import PdfPages plt.switch_backend('agg') def readFileReferenceFree(file): with open(file, 'r') as dest_f: data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter='\t', comments='#', dtype=str) return(data_array) def make_argparser(): parser = argparse.ArgumentParser(description='Family Size Distribution of duplex sequencing data') parser.add_argument('--inputFile1', help='Tabular File with three columns: ab or ba, tag and family size.') parser.add_argument('--inputName1') parser.add_argument('--inputFile2', default=None, help='Tabular File with three columns: ab or ba, tag and family size.') parser.add_argument('--inputName2') parser.add_argument('--inputFile3', default=None, help='Tabular File with three columns: ab or ba, tag and family size.') parser.add_argument('--inputName3') parser.add_argument('--inputFile4', default=None, help='Tabular File with three columns: ab or ba, tag and family size.') parser.add_argument('--inputName4') parser.add_argument('--log_axis', action="store_false", help='Transform y axis in log scale.') parser.add_argument('--rel_freq', action="store_false", help='If False, the relative frequencies are displayed.') parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf file.') parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the tabular file.') return parser def compare_read_families(argv): parser = make_argparser() args = parser.parse_args(argv[1:]) firstFile = args.inputFile1 name1 = args.inputName1 secondFile = args.inputFile2 name2 = args.inputName2 thirdFile = args.inputFile3 name3 = args.inputName3 fourthFile = args.inputFile4 name4 = args.inputName4 log_axis = args.log_axis rel_freq = args.rel_freq title_file = args.output_tabular title_file2 = args.output_pdf sep = "\t" plt.rc('figure', figsize=(11.69, 8.27)) # A4 format plt.rcParams['patch.edgecolor'] = "black" plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.labelsize'] = 14 list_to_plot = [] label = [] data_array_list = [] list_to_plot_original = [] colors = [] bins = numpy.arange(1, 22) with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf: fig = plt.figure() fig.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0) fig2 = plt.figure() fig2.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0) if firstFile is not None: file1 = readFileReferenceFree(firstFile) integers = numpy.array(file1[:, 0]).astype(int) # keep original family sizes list_to_plot_original.append(integers) colors.append("#0000FF") # for plot: replace all big family sizes by 22 data1 = numpy.clip(integers, bins[0], bins[-1]) name1 = name1.split(".tabular")[0] if len(name1) > 40: name1 = name1[:40] list_to_plot.append(data1) label.append(name1) data_array_list.append(file1) legend = "\n\n\n{}".format(name1) fig.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure) fig2.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure) legend1 = "singletons:\nnr. of tags\n{:,} ({:.3f})".format(numpy.bincount(data1)[1], float(numpy.bincount(data1)[1]) / len(data1)) fig.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure) fig2.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure) legend3b = "PE reads\n{:,} ({:.3f})".format(numpy.bincount(data1)[1], float(numpy.bincount(data1)[1]) / sum(integers)) fig.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure) fig2.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure) legend4 = "family size > 20:\nnr. of tags\n{:,} ({:.3f})".format(len(integers[integers > 20]), float(len(integers[integers > 20])) / len(integers)) fig.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure) fig2.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure) legend5 = "PE reads\n{:,} ({:.3f})".format(sum(integers[integers > 20]), float(sum(integers[integers > 20])) / sum(integers)) fig.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure) fig2.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure) legend6 = "total nr. of\ntags\n{:,}".format(len(data1)) fig.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure) fig2.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure) legend6b = "PE reads\n{:,}".format(sum(integers)) fig.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure) fig2.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure) if secondFile is not None: file2 = readFileReferenceFree(secondFile) integers2 = numpy.array(file2[:, 0]).astype(int) # keep original family sizes list_to_plot_original.append(integers2) colors.append("#298A08") data2 = numpy.clip(integers2, bins[0], bins[-1]) list_to_plot.append(data2) name2 = name2.split(".tabular")[0] if len(name2) > 40: name2 = name2[:40] label.append(name2) data_array_list.append(file2) fig.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure) fig2.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure) legend1 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / len(data2)) fig.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure) fig2.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure) legend3 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / sum(integers2)) fig.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure) fig2.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure) legend4 = "{:,} ({:.3f})".format(len(integers2[integers2 > 20]), float(len(integers2[integers2 > 20])) / len(integers2)) fig.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure) fig2.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure) legend5 = "{:,} ({:.3f})".format(sum(integers2[integers2 > 20]), float(sum(integers2[integers2 > 20])) / sum(integers2)) fig.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure) fig2.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure) legend6 = "{:,}".format(len(data2)) fig.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure) fig2.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure) legend6b = "{:,}".format(sum(integers2)) fig.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure) fig2.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure) if thirdFile is not None: file3 = readFileReferenceFree(thirdFile) integers3 = numpy.array(file3[:, 0]).astype(int) # keep original family sizes list_to_plot_original.append(integers3) colors.append("#DF0101") data3 = numpy.clip(integers3, bins[0], bins[-1]) list_to_plot.append(data3) name3 = name3.split(".tabular")[0] if len(name3) > 40: name3 = name3[:40] label.append(name3) data_array_list.append(file3) fig.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure) fig2.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure) legend1 = "{:,} ({:.3f})".format(numpy.bincount(data3)[1], float(numpy.bincount(data3)[1]) / len(data3)) fig.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure) fig2.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure) legend3b = "{:,} ({:.3f})".format(numpy.bincount(data3)[1], float(numpy.bincount(data3)[1]) / sum(integers3)) fig.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure) fig2.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure) legend4 = "{:,} ({:.3f})".format(len(integers3[integers3 > 20]), float(len(integers3[integers3 > 20])) / len(integers3)) fig.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure) fig2.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure) legend5 = "{:,} ({:.3f})".format(sum(integers3[integers3 > 20]), float(sum(integers3[integers3 > 20])) / sum(integers3)) fig.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure) fig2.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure) legend6 = "{:,}".format(len(data3)) fig.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure) fig2.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure) legend6b = "{:,}".format(sum(integers3)) fig.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure) fig2.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure) if fourthFile is not None: file4 = readFileReferenceFree(fourthFile) integers4 = numpy.array(file4[:, 0]).astype(int) # keep original family sizes list_to_plot_original.append(integers4) colors.append("#04cec7") data4 = numpy.clip(integers4, bins[0], bins[-1]) list_to_plot.append(data4) name4 = name4.split(".tabular")[0] if len(name4) > 40: name4 = name4[:40] label.append(name4) data_array_list.append(file4) fig.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure) fig2.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure) legend1 = "{:,} ({:.3f})".format(numpy.bincount(data4)[1], float(numpy.bincount(data4)[1]) / len(data4)) fig.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure) fig2.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure) legend3b = "{:,} ({:.3f})".format(numpy.bincount(data4)[1], float(numpy.bincount(data4)[1]) / sum(integers4)) fig.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure) fig2.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure) legend4 = "{:,} ({:.3f})".format(len(integers4[integers4 > 20]), float(len(integers4[integers4 > 20])) / len(integers4)) fig.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure) fig2.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure) legend5 = "{:,} ({:.3f})".format(sum(integers4[integers4 > 20]), float(sum(integers4[integers4 > 20])) / sum(integers4)) fig.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure) fig2.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure) legend6 = "{:,}".format(len(data4)) fig.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure) fig2.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure) legend6b = "{:,}".format(sum(integers4)) fig.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure) fig2.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure) list_to_plot2 = list_to_plot if rel_freq: ylab = "Relative Frequency" else: ylab = "Absolute Frequency" # PLOT FSD based on tags fig.suptitle('Family Size Distribution (FSD) based on families', fontsize=14) ax = fig.add_subplot(1, 1, 1) ticks = numpy.arange(1, 22, 1) ticks1 = [str(_) for _ in ticks] ticks1[len(ticks1) - 1] = ">20" ax.set_xticks([], []) if rel_freq: w = [numpy.zeros_like(data) + 1. / len(data) for data in list_to_plot2] counts = ax.hist(list_to_plot2, weights=w, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", color=colors, linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8) ax.set_ylim(0, 1.07) else: counts = ax.hist(list_to_plot2, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8, color=colors) ax.set_xticks(numpy.array(ticks)) ax.set_xticklabels(ticks1) ax.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1)) ax.set_ylabel(ylab, fontsize=14) ax.set_xlabel("Family size", fontsize=14) if log_axis: ax.set_yscale('log') ax.grid(b=True, which="major", color="#424242", linestyle=":") ax.margins(0.01, None) pdf.savefig(fig) # PLOT FSD based on PE reads fig2.suptitle('Family Size Distribution (FSD) based on PE reads', fontsize=14) ax2 = fig2.add_subplot(1, 1, 1) ticks = numpy.arange(1, 22) ticks1 = [str(_) for _ in ticks] ticks1[len(ticks1) - 1] = ">20" reads = [] reads_rel = [] barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1) ax2.set_xticks([], []) for i in range(len(list_to_plot2)): x = list(numpy.arange(1, 22).astype(float)) unique, c = numpy.unique(list_to_plot2[i], return_counts=True) y = unique * c if sum(list_to_plot_original[i] > 20) > 0: y[len(y) - 1] = sum(list_to_plot_original[i][list_to_plot_original[i] > 20]) y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))] reads.append(y) reads_rel.append(list(numpy.float_(y)) / sum(y)) if len(list_to_plot2) == 1: x = [xi * 0.5 for xi in x] w = 0.4 else: x = [xi + barWidth for xi in x] w = 1. / (len(list_to_plot) + 1) if rel_freq: ax2.bar(x, list(numpy.float_(y)) / numpy.sum(y), align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i]) ax2.set_ylim(0, 1.07) else: ax2.bar(x, y, align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i]) if i == len(list_to_plot2) - 1: barWidth += 1. / (len(list_to_plot) + 1) + 1. / (len(list_to_plot) + 1) else: barWidth += 1. / (len(list_to_plot) + 1) ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1)) if len(list_to_plot2) == 1: ax2.set_xticks(numpy.array([xi + 0.2 for xi in x])) else: ax2.set_xticks(numpy.array(ticks)) ax2.set_xticklabels(ticks1) ax2.set_xlabel("Family size", fontsize=14) ax2.set_ylabel(ylab, fontsize=14) if log_axis: ax2.set_yscale('log') ax2.grid(b=True, which="major", color="#424242", linestyle=":") ax2.margins(0.01, None) pdf.savefig(fig2) plt.close() # write data to CSV file tags counts = [numpy.bincount(di, minlength=22)[1:] for di in list_to_plot2] # original counts of family sizes output_file.write("Values from family size distribution with all datasets based on families\n") output_file.write("\nFamily size") for i in label: output_file.write("{}{}".format(sep, i)) output_file.write("\n") j = 0 for fs in bins: if fs == 21: fs = ">20" else: fs = "={}".format(fs) output_file.write("FS{}{}".format(fs, sep)) for n in range(len(label)): output_file.write("{}{}".format(int(counts[n][j]), sep)) output_file.write("\n") j += 1 output_file.write("sum{}".format(sep)) for i in counts: output_file.write("{}{}".format(int(sum(i)), sep)) # write data to CSV file PE reads output_file.write("\n\nValues from family size distribution with all datasets based on PE reads\n") output_file.write("\nFamily size") for i in label: output_file.write("{}{}".format(sep, i)) output_file.write("\n") j = 0 for fs in bins: if fs == 21: fs = ">20" else: fs = "={}".format(fs) output_file.write("FS{}{}".format(fs, sep)) if len(label) == 1: output_file.write("{}{}".format(int(reads[0][j]), sep)) else: for n in range(len(label)): output_file.write("{}{}".format(int(reads[n][j]), sep)) output_file.write("\n") j += 1 output_file.write("sum{}".format(sep)) if len(label) == 1: output_file.write("{}{}".format(int(sum(numpy.concatenate(reads))), sep)) else: for i in reads: output_file.write("{}{}".format(int(sum(i)), sep)) output_file.write("\n") # Family size distribution after DCS and SSCS for dataset, data_o, name_file in zip(list_to_plot, data_array_list, label): tags = numpy.array(data_o[:, 2]) seq = numpy.array(data_o[:, 1]) data = numpy.array(dataset) data_o = numpy.array(data_o[:, 0]).astype(int) # find all unique tags and get the indices for ALL tags, but only once u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True) d = u[c > 1] # get family sizes, tag for duplicates duplTags_double = data[numpy.in1d(seq, d)] duplTags_double_o = data_o[numpy.in1d(seq, d)] duplTags = duplTags_double[0::2] # ab of DCS duplTags_o = duplTags_double_o[0::2] # ab of DCS duplTagsBA = duplTags_double[1::2] # ba of DCS duplTagsBA_o = duplTags_double_o[1::2] # ba of DCS # get family sizes for SSCS with no partner ab = numpy.where(tags == "ab")[0] abSeq = seq[ab] ab_o = data_o[ab] ab = data[ab] ba = numpy.where(tags == "ba")[0] baSeq = seq[ba] ba_o = data_o[ba] ba = data[ba] dataAB = ab[numpy.in1d(abSeq, d, invert=True)] dataAB_o = ab_o[numpy.in1d(abSeq, d, invert=True)] dataBA = ba[numpy.in1d(baSeq, d, invert=True)] dataBA_o = ba_o[numpy.in1d(baSeq, d, invert=True)] list1 = [duplTags_double, dataAB, dataBA] # list for plotting list1_o = [duplTags_double_o, dataAB_o, dataBA_o] # list for plotting # information for family size >= 3 dataAB_FS3 = dataAB[dataAB >= 3] dataAB_FS3_o = dataAB_o[dataAB_o >= 3] dataBA_FS3 = dataBA[dataBA >= 3] dataBA_FS3_o = dataBA_o[dataBA_o >= 3] duplTags_FS3 = duplTags[(duplTags >= 3) & (duplTagsBA >= 3)] # ab+ba with FS>=3 duplTags_FS3_BA = duplTagsBA[(duplTags >= 3) & (duplTagsBA >= 3)] # ba+ab with FS>=3 duplTags_double_FS3 = len(duplTags_FS3) + len(duplTags_FS3_BA) # both ab and ba strands with FS>=3 # original FS duplTags_FS3_o = duplTags_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ab+ba with FS>=3 duplTags_FS3_BA_o = duplTagsBA_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ba+ab with FS>=3 duplTags_double_FS3_o = sum(duplTags_FS3_o) + sum(duplTags_FS3_BA_o) # both ab and ba strands with FS>=3 fig = plt.figure() plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0) if rel_freq: w = [numpy.zeros_like(dj) + 1. / len(numpy.concatenate(list1)) for dj in list1] plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], weights=w, edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8) plt.ylim(0, 1.07) else: plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8) # tick labels of x axis ticks = numpy.arange(1, 22, 1) ticks1 = [str(_) for _ in ticks] ticks1[len(ticks1) - 1] = ">20" plt.xticks(numpy.array(ticks), ticks1) singl = len(data_o[data_o == 1]) last = len(data_o[data_o > 20]) # large families if log_axis: plt.yscale('log') plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True) plt.title("{}: FSD based on families".format(name_file), fontsize=14) plt.xlabel("Family size", fontsize=14) plt.ylabel(ylab, fontsize=14) plt.margins(0.01, None) plt.grid(b=True, which="major", color="#424242", linestyle=":") # extra information beneath the plot legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags=" plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA), len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)), (len(ab) + len(ba))) plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o), sum(duplTags_o), sum(duplTags_double_o), (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), (sum(ab_o) + sum(ba_o))) plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure) legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format( float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)), float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)), float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), (len(dataAB) + len(dataBA) + len(duplTags))) plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)), float(len(dataBA)) / (len(ab) + len(ba)), float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), (len(ab) + len(ba))) plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend1 = "\nsingletons:\nfamily size > 20:" plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure) legend4 = "{:,}\n{:,}".format(singl, last) plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure) legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data)) plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure) legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20])) plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure) legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o)) plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure) legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format( float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o))) plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format( float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)), float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)), float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)), float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o))) plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure) pdf.savefig(fig) plt.close() # PLOT FSD based on PE reads fig3 = plt.figure() plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0) fig3.suptitle("{}: FSD based on PE reads".format(name_file), fontsize=14) ax2 = fig3.add_subplot(1, 1, 1) ticks = numpy.arange(1, 22) ticks1 = [str(_) for _ in ticks] ticks1[len(ticks1) - 1] = ">20" reads = [] reads_rel = [] # barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1) ax2.set_xticks([], []) list_y = [] label = ["duplex", "ab", "ba"] col = ["#FF0000", "#5FB404", "#FFBF00"] for i in range(len(list1)): x = list(numpy.arange(1, 22).astype(float)) unique, c = numpy.unique(list1[i], return_counts=True) y = unique * c if sum(list1_o[i] > 20) > 0: y[len(y) - 1] = sum(list1_o[i][list1_o[i] > 20]) y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))] reads.append(y) reads_rel.append(list(numpy.float_(y)) / sum(numpy.concatenate(list1_o))) if rel_freq: y = list(numpy.float_(y)) / sum(numpy.concatenate(list1_o)) ax2.set_ylim(0, 1.07) else: y = y list_y.append(y) if i == 0: ax2.bar(x, y, align="center", width=0.8, edgecolor="black", label=label[0], linewidth=1, alpha=1, color=col[0]) elif i == 1: ax2.bar(x, y, bottom=list_y[i - 1], align="center", width=0.8, edgecolor="black", label=label[1], linewidth=1, alpha=1, color=col[1]) elif i == 2: bars = numpy.add(list_y[0], list_y[1]).tolist() ax2.bar(x, y, bottom=bars, align="center", width=0.8, edgecolor="black", label=label[2], linewidth=1, alpha=1, color=col[2]) ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1)) singl = len(data_o[data_o == 1]) last = len(data_o[data_o > 20]) # large families ax2.set_xticks(numpy.array(ticks)) ax2.set_xticklabels(ticks1) ax2.set_xlabel("Family size", fontsize=14) ax2.set_ylabel(ylab, fontsize=14) if log_axis: ax2.set_yscale('log') ax2.grid(b=True, which="major", color="#424242", linestyle=":") ax2.margins(0.01, None) # extra information beneath the plot legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags=" plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA), len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)), (len(ab) + len(ba))) plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o), sum(duplTags_o), sum(duplTags_double_o), (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), (sum(ab_o) + sum(ba_o))) plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure) legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format( float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)), float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)), float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), (len(dataAB) + len(dataBA) + len(duplTags))) plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)), float(len(dataBA)) / (len(ab) + len(ba)), float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), (len(ab) + len(ba))) plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend1 = "\nsingletons:\nfamily size > 20:" plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure) legend4 = "{:,}\n{:,}".format(singl, last) plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure) legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data)) plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure) legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20])) plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure) legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o)) plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure) legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format( float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o))) plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure) legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format( float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)), float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)), float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)), float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o))) plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure) pdf.savefig(fig3) plt.close() # write same information to a csv file count = numpy.bincount(data_o) # original counts of family sizes output_file.write("\nDataset:{}{}\n".format(sep, name_file)) output_file.write("max. family size:{}{}\n".format(sep, max(data_o))) output_file.write("absolute frequency:{}{}\n".format(sep, count[len(count) - 1])) output_file.write("relative frequency:{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count))) output_file.write("median family size:{}{}\n".format(sep, numpy.median(numpy.array(data_o)))) output_file.write("mean family size:{}{}\n\n".format(sep, numpy.mean(numpy.array(data_o)))) output_file.write( "{}singletons:{}{}{}family size > 20:{}{}{}{}length of dataset:\n".format(sep, sep, sep, sep, sep, sep, sep, sep)) output_file.write( "{}nr. of tags{}rel. freq of tags{}rel.freq of PE reads{}nr. of tags{}rel. freq of tags{}nr. of PE reads{}rel. freq of PE reads{}total nr. of tags{}total nr. of PE reads\n".format( sep, sep, sep, sep, sep, sep, sep, sep, sep)) output_file.write("{}{}{}{}{:.3f}{}{:.3f}{}{}{}{:.3f}{}{}{}{:.3f}{}{}{}{}\n\n".format( name_file, sep, singl, sep, float(singl) / len(data), sep, float(singl) / sum(data_o), sep, last, sep, float(last) / len(data), sep, sum(data_o[data_o > 20]), sep, float(sum(data_o[data_o > 20])) / sum(data_o), sep, len(data), sep, sum(data_o))) # information for FS >= 1 output_file.write( "The unique frequencies were calculated from the dataset where the tags occured only once (=ab without DCS, ba without DCS)\n" "Whereas the total frequencies were calculated from the whole dataset (=including the DCS).\n\n") output_file.write( "FS >= 1{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep, sep, sep, sep, sep)) output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep)) output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format( sep, len(dataAB), sep, sum(dataAB_o), sep, float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep, float(len(dataAB)) / (len(ab) + len(ba)), sep, float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)))) output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format( sep, len(dataBA), sep, sum(dataBA_o), sep, float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep, float(len(dataBA)) / (len(ab) + len(ba)), sep, float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)))) output_file.write( "DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format( sep, len(duplTags), len(duplTags_double), sep, sum(duplTags_o), sum(duplTags_double_o), sep, float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep, float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), sep, float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)), float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)))) output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format( sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep, (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep, (len(ab) + len(ba)), sep, (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, (sum(ab_o) + sum(ba_o)))) # information for FS >= 3 output_file.write( "\nFS >= 3{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep, sep, sep, sep, sep)) output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep)) output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format( sep, len(dataAB_FS3), sep, sum(dataAB_FS3_o), sep, float(len(dataAB_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, float(len(dataAB_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3), sep, float(sum(dataAB_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep, float(sum(dataAB_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o))) output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format( sep, len(dataBA_FS3), sep, sum(dataBA_FS3_o), sep, float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3), sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o))) output_file.write( "DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format( sep, len(duplTags_FS3), duplTags_double_FS3, sep, sum(duplTags_FS3_o), duplTags_double_FS3_o, sep, float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3), float(duplTags_double_FS3) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3), sep, float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep, float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o), float(duplTags_double_FS3_o) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o))) output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format( sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3), sep, (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep, (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o))) counts = [numpy.bincount(dk, minlength=22)[1:] for dk in list1] # original counts of family sizes output_file.write("\nValues from family size distribution based on families\n") output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep)) j = 0 for fs in bins: if fs == 21: fs = ">20" else: fs = "={}".format(fs) output_file.write("FS{}{}".format(fs, sep)) for n in range(3): output_file.write("{}{}".format(int(counts[n][j]), sep)) output_file.write("{}\n".format(counts[0][j] + counts[1][j] + counts[2][j])) j += 1 output_file.write("sum{}".format(sep)) for i in counts: output_file.write("{}{}".format(int(sum(i)), sep)) output_file.write("{}\n".format(sum(counts[0] + counts[1] + counts[2]))) output_file.write("\nValues from family size distribution based on PE reads\n") output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep)) j = 0 for fs in bins: if fs == 21: fs = ">20" else: fs = "={}".format(fs) output_file.write("FS{}{}".format(fs, sep)) for n in range(3): output_file.write("{}{}".format(int(reads[n][j]), sep)) output_file.write("{}\n".format(reads[0][j] + reads[1][j] + reads[2][j])) j += 1 output_file.write("sum{}".format(sep)) for i in reads: output_file.write("{}{}".format(int(sum(i)), sep)) output_file.write("{}\n".format(sum(reads[0] + reads[1] + reads[2]))) print("Files successfully created!") if __name__ == '__main__': sys.exit(compare_read_families(sys.argv))
mit
theoryno3/scikit-learn
sklearn/linear_model/tests/test_perceptron.py
375
1815
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_raises from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) X_csr.sort_indices() class MyPerceptron(object): def __init__(self, n_iter=1): self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): if self.predict(X[i])[0] != y[i]: self.w += y[i] * X[i] self.b += y[i] def project(self, X): return np.dot(X, self.w) + self.b def predict(self, X): X = np.atleast_2d(X) return np.sign(self.project(X)) def test_perceptron_accuracy(): for data in (X, X_csr): clf = Perceptron(n_iter=30, shuffle=False) clf.fit(data, y) score = clf.score(data, y) assert_true(score >= 0.7) def test_perceptron_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 clf1 = MyPerceptron(n_iter=2) clf1.fit(X, y_bin) clf2 = Perceptron(n_iter=2, shuffle=False) clf2.fit(X, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) def test_undefined_methods(): clf = Perceptron() for meth in ("predict_proba", "predict_log_proba"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
bsd-3-clause
pianomania/scikit-learn
examples/mixture/plot_gmm.py
112
3265
""" ================================= Gaussian Mixture Model Ellipsoids ================================= Plot the confidence ellipsoids of a mixture of two Gaussians obtained with Expectation Maximisation (``GaussianMixture`` class) and Variational Inference (``BayesianGaussianMixture`` class models with a Dirichlet process prior). Both models have access to five components with which to fit the data. Note that the Expectation Maximisation model will necessarily use all five components while the Variational Inference model will effectively only use as many as are needed for a good fit. Here we can see that the Expectation Maximisation model splits some components arbitrarily, because it is trying to fit too many components, while the Dirichlet Process model adapts it number of state automatically. This example doesn't show it, as we're in a low-dimensional space, but another advantage of the Dirichlet process model is that it can fit full covariance matrices effectively even when there are less examples per cluster than there are dimensions in the data, due to regularization properties of the inference algorithm. """ import itertools import numpy as np from scipy import linalg import matplotlib.pyplot as plt import matplotlib as mpl from sklearn import mixture color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']) def plot_results(X, Y_, means, covariances, index, title): splot = plt.subplot(2, 1, 1 + index) for i, (mean, covar, color) in enumerate(zip( means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2. * np.sqrt(2.) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180. * angle / np.pi # convert to degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) plt.xlim(-9., 5.) plt.ylim(-3., 6.) plt.xticks(()) plt.yticks(()) plt.title(title) # Number of samples per component n_samples = 500 # Generate random sample, two components np.random.seed(0) C = np.array([[0., -0.1], [1.7, .4]]) X = np.r_[np.dot(np.random.randn(n_samples, 2), C), .7 * np.random.randn(n_samples, 2) + np.array([-6, 3])] # Fit a Gaussian mixture with EM using five components gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X) plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0, 'Gaussian Mixture') # Fit a Dirichlet process Gaussian mixture using five components dpgmm = mixture.BayesianGaussianMixture(n_components=5, covariance_type='full').fit(X) plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1, 'Bayesian Gaussian Mixture with a Dirichlet process prior') plt.show()
bsd-3-clause
PatrickOReilly/scikit-learn
sklearn/metrics/scorer.py
22
13077
""" The :mod:`sklearn.metrics.scorer` submodule implements a flexible interface for model selection and evaluation using arbitrary score functions. A scorer object is a callable that can be passed to :class:`sklearn.model_selection.GridSearchCV` or :func:`sklearn.model_selection.cross_val_score` as the ``scoring`` parameter, to specify how a model should be evaluated. The signature of the call is ``(estimator, X, y)`` where ``estimator`` is the model to be evaluated, ``X`` is the test data and ``y`` is the ground truth labeling (or ``None`` in the case of unsupervised models). """ # Authors: Andreas Mueller <amueller@ais.uni-bonn.de> # Lars Buitinck # Arnaud Joly <arnaud.v.joly@gmail.com> # License: Simplified BSD from abc import ABCMeta, abstractmethod import numpy as np from . import (r2_score, median_absolute_error, mean_absolute_error, mean_squared_error, accuracy_score, f1_score, roc_auc_score, average_precision_score, precision_score, recall_score, log_loss) from .cluster import adjusted_rand_score from ..utils.multiclass import type_of_target from ..externals import six from ..base import is_regressor class _BaseScorer(six.with_metaclass(ABCMeta, object)): def __init__(self, score_func, sign, kwargs): self._kwargs = kwargs self._score_func = score_func self._sign = sign @abstractmethod def __call__(self, estimator, X, y, sample_weight=None): pass def __repr__(self): kwargs_string = "".join([", %s=%s" % (str(k), str(v)) for k, v in self._kwargs.items()]) return ("make_scorer(%s%s%s%s)" % (self._score_func.__name__, "" if self._sign > 0 else ", greater_is_better=False", self._factory_args(), kwargs_string)) def _factory_args(self): """Return non-default make_scorer arguments for repr.""" return "" class _PredictScorer(_BaseScorer): def __call__(self, estimator, X, y_true, sample_weight=None): """Evaluate predicted target values for X relative to y_true. Parameters ---------- estimator : object Trained estimator to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to estimator.predict. y_true : array-like Gold standard target values for X. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = estimator.predict(X) if sample_weight is not None: return self._sign * self._score_func(y_true, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y_true, y_pred, **self._kwargs) class _ProbaScorer(_BaseScorer): def __call__(self, clf, X, y, sample_weight=None): """Evaluate predicted probabilities for X relative to y_true. Parameters ---------- clf : object Trained classifier to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not probabilities. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = clf.predict_proba(X) if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y, y_pred, **self._kwargs) def _factory_args(self): return ", needs_proba=True" class _ThresholdScorer(_BaseScorer): def __call__(self, clf, X, y, sample_weight=None): """Evaluate decision function output for X relative to y_true. Parameters ---------- clf : object Trained classifier to use for scoring. Must have either a decision_function method or a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.decision_function or clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not decision function values. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_type = type_of_target(y) if y_type not in ("binary", "multilabel-indicator"): raise ValueError("{0} format is not supported".format(y_type)) if is_regressor(clf): y_pred = clf.predict(X) else: try: y_pred = clf.decision_function(X) # For multi-output multi-class estimator if isinstance(y_pred, list): y_pred = np.vstack(p for p in y_pred).T except (NotImplementedError, AttributeError): y_pred = clf.predict_proba(X) if y_type == "binary": y_pred = y_pred[:, 1] elif isinstance(y_pred, list): y_pred = np.vstack([p[:, -1] for p in y_pred]).T if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y, y_pred, **self._kwargs) def _factory_args(self): return ", needs_threshold=True" def get_scorer(scoring): if isinstance(scoring, six.string_types): try: scorer = SCORERS[scoring] except KeyError: raise ValueError('%r is not a valid scoring value. ' 'Valid options are %s' % (scoring, sorted(SCORERS.keys()))) else: scorer = scoring return scorer def _passthrough_scorer(estimator, *args, **kwargs): """Function that wraps estimator.score""" return estimator.score(*args, **kwargs) def check_scoring(estimator, scoring=None, allow_none=False): """Determine scorer from user options. A TypeError will be thrown if the estimator cannot be scored. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. allow_none : boolean, optional, default: False If no scoring is specified and the estimator has no score function, we can either return None or raise an exception. Returns ------- scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. """ has_scoring = scoring is not None if not hasattr(estimator, 'fit'): raise TypeError("estimator should be an estimator implementing " "'fit' method, %r was passed" % estimator) elif has_scoring: return get_scorer(scoring) elif hasattr(estimator, 'score'): return _passthrough_scorer elif allow_none: return None else: raise TypeError( "If no scoring is specified, the estimator passed should " "have a 'score' method. The estimator %r does not." % estimator) def make_scorer(score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs): """Make a scorer from a performance metric or loss function. This factory function wraps scoring functions for use in GridSearchCV and cross_val_score. It takes a score function, such as ``accuracy_score``, ``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision`` and returns a callable that scores an estimator's output. Read more in the :ref:`User Guide <scoring>`. Parameters ---------- score_func : callable, Score function (or loss function) with signature ``score_func(y, y_pred, **kwargs)``. greater_is_better : boolean, default=True Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. needs_proba : boolean, default=False Whether score_func requires predict_proba to get probability estimates out of a classifier. needs_threshold : boolean, default=False Whether score_func takes a continuous decision certainty. This only works for binary classification using estimators that have either a decision_function or predict_proba method. For example ``average_precision`` or the area under the roc curve can not be computed using discrete predictions alone. **kwargs : additional arguments Additional parameters to be passed to score_func. Returns ------- scorer : callable Callable object that returns a scalar score; greater is better. Examples -------- >>> from sklearn.metrics import fbeta_score, make_scorer >>> ftwo_scorer = make_scorer(fbeta_score, beta=2) >>> ftwo_scorer make_scorer(fbeta_score, beta=2) >>> from sklearn.model_selection import GridSearchCV >>> from sklearn.svm import LinearSVC >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, ... scoring=ftwo_scorer) """ sign = 1 if greater_is_better else -1 if needs_proba and needs_threshold: raise ValueError("Set either needs_proba or needs_threshold to True," " but not both.") if needs_proba: cls = _ProbaScorer elif needs_threshold: cls = _ThresholdScorer else: cls = _PredictScorer return cls(score_func, sign, kwargs) # Standard regression scores r2_scorer = make_scorer(r2_score) mean_squared_error_scorer = make_scorer(mean_squared_error, greater_is_better=False) mean_absolute_error_scorer = make_scorer(mean_absolute_error, greater_is_better=False) median_absolute_error_scorer = make_scorer(median_absolute_error, greater_is_better=False) # Standard Classification Scores accuracy_scorer = make_scorer(accuracy_score) f1_scorer = make_scorer(f1_score) # Score functions that need decision values roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_threshold=True) average_precision_scorer = make_scorer(average_precision_score, needs_threshold=True) precision_scorer = make_scorer(precision_score) recall_scorer = make_scorer(recall_score) # Score function for probabilistic classification log_loss_scorer = make_scorer(log_loss, greater_is_better=False, needs_proba=True) # Clustering scores adjusted_rand_scorer = make_scorer(adjusted_rand_score) SCORERS = dict(r2=r2_scorer, median_absolute_error=median_absolute_error_scorer, mean_absolute_error=mean_absolute_error_scorer, mean_squared_error=mean_squared_error_scorer, accuracy=accuracy_scorer, roc_auc=roc_auc_scorer, average_precision=average_precision_scorer, log_loss=log_loss_scorer, adjusted_rand_score=adjusted_rand_scorer) for name, metric in [('precision', precision_score), ('recall', recall_score), ('f1', f1_score)]: SCORERS[name] = make_scorer(metric) for average in ['macro', 'micro', 'samples', 'weighted']: qualified_name = '{0}_{1}'.format(name, average) SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
bsd-3-clause
ray-project/ray
python/ray/train/sklearn/sklearn_checkpoint.py
1
2557
import os from typing import TYPE_CHECKING, Optional from sklearn.base import BaseEstimator from ray.air._internal.checkpointing import save_preprocessor_to_dir from ray.air.checkpoint import Checkpoint from ray.air.constants import MODEL_KEY import ray.cloudpickle as cpickle from ray.util.annotations import PublicAPI if TYPE_CHECKING: from ray.data.preprocessor import Preprocessor @PublicAPI(stability="alpha") class SklearnCheckpoint(Checkpoint): """A :py:class:`~ray.air.checkpoint.Checkpoint` with sklearn-specific functionality. Create this from a generic :py:class:`~ray.air.checkpoint.Checkpoint` by calling ``SklearnCheckpoint.from_checkpoint(ckpt)`` """ @classmethod def from_estimator( cls, estimator: BaseEstimator, *, path: os.PathLike, preprocessor: Optional["Preprocessor"] = None, ) -> "SklearnCheckpoint": """Create a :py:class:`~ray.air.checkpoint.Checkpoint` that stores an sklearn ``Estimator``. Args: estimator: The ``Estimator`` to store in the checkpoint. path: The directory where the checkpoint will be stored. preprocessor: A fitted preprocessor to be applied before inference. Returns: An :py:class:`SklearnCheckpoint` containing the specified ``Estimator``. Examples: >>> from ray.train.sklearn import SklearnCheckpoint >>> from sklearn.ensemble import RandomForestClassifier >>> >>> estimator = RandomForestClassifier() >>> checkpoint = SklearnCheckpoint.from_estimator(estimator, path=".") You can use a :py:class:`SklearnCheckpoint` to create an :py:class:`~ray.train.sklearn.SklearnPredictor` and preform inference. >>> from ray.train.sklearn import SklearnPredictor >>> >>> predictor = SklearnPredictor.from_checkpoint(checkpoint) """ with open(os.path.join(path, MODEL_KEY), "wb") as f: cpickle.dump(estimator, f) if preprocessor: save_preprocessor_to_dir(preprocessor, path) checkpoint = cls.from_directory(path) return checkpoint def get_estimator(self) -> BaseEstimator: """Retrieve the ``Estimator`` stored in this checkpoint.""" with self.as_directory() as checkpoint_path: estimator_path = os.path.join(checkpoint_path, MODEL_KEY) with open(estimator_path, "rb") as f: return cpickle.load(f)
apache-2.0
MarcSpitz/ldebroux_kjadin_masters-thesis_2014
src/comparison.py
1
7412
#!/usr/bin/env python # -*- coding: utf-8 -*- # @author: Debroux Léonard <leonard.debroux@gmail.com> # @author: Jadin Kevin <contact@kjadin.com> import sys, os from utils import Utils from setup import Setup import logging as log import csv from pylab import savefig, figure, hist, xlim, subplots, tight_layout import numpy from abstracttest import AbstractTest class ComparisonTest(AbstractTest): """ Comparison test Compares several algorithm configurations on several datasets Outputs a comparison table csv and a plot of the number of rounds of local search for each configuration Comparison table format: \|__alg0_|__alg1_|_..._|__algn_| dataset0_|_val00_|_val01_|_..._|_val0n_| ... _|_ ... | datasetm_|_valm0_|_valm1_|_..._|_valmn_| valij are percentages initially, the values on the lines are the mean of all the executions for dataset i and alg j lower value is expressed as 100% and all the others are computed based on this reference value """ def positiveRealRoot(self, n, r): roots = numpy.roots([1]+[0]*(r-1)+[-n]) for r in roots: if r.imag == 0.0 and r.real >= 0: # return the first and only root that is real and positive return r.real def geometricMean(self, l): product = reduce(lambda x, y: x*y, l) return self.positiveRealRoot(product, len(l)) def addListToDict(self, dictionary, index, l): if index in dictionary: dictionary[index].extend(l) else: dictionary[index] = l def run(self): timestr = Utils.getTimeString() log.info("test started at: %s" % timestr) k = 1 custom_setupDicts = self.setupDicts dataSets = self.dataSets NGdict = self.NGdict root = self.root tests = self.tests config_file = self.config_file shortest_paths_file = self.shortest_paths_file_name refColumn = self.refColumn prependstr = "_".join([config_file, shortest_paths_file]) filename = "%s_comparison.txt" % (prependstr) filename = os.path.join(self.working_directory, filename) log.info("writing to file %s" % filename) f = open(filename,'w') writeNewline = self.writeNewline # shortcut writeNewline(filename, f) dataSetCostList = [] # array improveTriesPerSetup = {} maxImprove = 0 minImprove = sys.maxint for dataSet in dataSets: dataSetIndex = dataSets.index(dataSet) setupCostList = [] # line in the array, costs for one dataset and all setups for s in custom_setupDicts: setupIndex = custom_setupDicts.index(s) self.log_progression(dataSetIndex+1, len(dataSets), setupIndex+1, len(custom_setupDicts), self.testname) Setup.reset_setup() # start from default Setup.configure(s) events = self.addImproveToDataSet(dataSet) # improves will be added to dataSet while keeping dataSet intact if log.getLogger(__name__).isEnabledFor(log.INFO): # logs only printed if asked for self.print_logs() # indexCostList those are the costs that will contribute to one cell indexCostList = [] # cell in the array, costs for one dataset, for one setup. for idx in range(tests): log.debug('test %s' % idx) _, tickCosts, improveTry, _, _, _ = Utils.run_setup(NGdict[k], root, events) self.writeDataHeader(f, dataSetIndex, setupIndex, idx) writeNewline('tickcosts: %s' % tickCosts, f) writeNewline('improvetry: %s' % improveTry, f) self.addListToDict(improveTriesPerSetup, setupIndex, improveTry) if improveTry and min(improveTry) < minImprove: minImprove = min(improveTry) if improveTry and max(improveTry) > maxImprove: maxImprove = max(improveTry) avgCost = numpy.mean(tickCosts) indexCostList.append(avgCost) m = numpy.mean(indexCostList) s = numpy.std(indexCostList) log.debug('mean: %s, std: %s' % (m, s)) setupCostList.append(m) # this is the content of one cell # setupCostList contains means of the $tests$ avgCost log.debug("setupCostList: %s" % setupCostList) # format setupCostList with percentages if refColumn > -1 and refColumn < len(setupCostList): minSetupCost = setupCostList[refColumn] else: minSetupCost = float(min(setupCostList)) for i in range(len(setupCostList)): percentage = setupCostList[i]/minSetupCost # *100 to have 100% = 100 and not 100% = 1 setupCostList[i] = percentage # setupCostList contains percentages dataSetCostList.append(setupCostList) f.close() setupResults = [] for i in range(len(custom_setupDicts)): # go through the columns of the array column = [] for j in range(len(dataSets)): # go through the lines column.append(dataSetCostList[j][i]) geoMean = self.geometricMean(column) setupResults.append(geoMean) log.info("dataSetCostList") for line in dataSetCostList: log.info(line) log.info('Geometric Mean: %s' % setupResults) # output a csv to be used later on prependstr = "_".join([config_file, shortest_paths_file]) filename = "%s_comparison.csv" % (prependstr) filename = os.path.join(self.working_directory, filename) log.info("writing to file %s" % filename) setupsHeader = ["setup%s" % i for i in range(len(custom_setupDicts))] header = setupsHeader delimiter_str = ',' ofile = open(filename, "wb") writer = csv.writer(ofile, delimiter=delimiter_str, # quotechar='"', # quoting=csv.QUOTE_ALL ) writer.writerow(header) lines = dataSetCostList significant_figures = 3 i = 1 for row in lines: rowRounded = [round(v, significant_figures) for v in row] towrite = rowRounded i = i+1 writer.writerow(towrite) geoMeansRounded = [round(v, significant_figures) for v in setupResults] writer.writerow(geoMeansRounded) def exportHist(l, step, filename): c = self.colors fig, ax1 = subplots() r = (minImprove, maxImprove) ax1.hist(l, step, range=r, normed=False, histtype='bar', facecolor=c['blue']) ax1.set_xlim(r) # set_xlim only for this plot ax1.set_xlabel('Rounds') ax1.set_ylabel('Nb values') tight_layout() savefig('%s_0.eps' % filename) fig, ax1 = subplots() ax1.hist(l, step/2.0, normed=False, histtype='bar', facecolor=c['blue']) ax1.set_xlabel('Rounds') ax1.set_ylabel('Nb values') tight_layout() savefig('%s_1.eps' % filename) for setup in improveTriesPerSetup.keys(): l = improveTriesPerSetup[setup] if l: prependstr = "_".join([config_file, shortest_paths_file]) filename = "%s_comparison_setup_%s" % (prependstr, setup) filename = os.path.join(self.working_directory, filename) exportHist(l, 30, filename) def main(argv): comparison = ComparisonTest() comparison.run() if __name__ == "__main__": main(sys.argv)
gpl-2.0
ray-project/ray
python/ray/air/tests/test_dataset_config.py
1
11306
import random from typing import Optional import pytest import ray from ray.air import session from ray.air.config import DatasetConfig, ScalingConfig from ray.data import Dataset, DatasetPipeline from ray.data.preprocessors import BatchMapper from ray.train.data_parallel_trainer import DataParallelTrainer @pytest.fixture def ray_start_4_cpus(): address_info = ray.init(num_cpus=4) yield address_info ray.shutdown() class TestBasic(DataParallelTrainer): _dataset_config = { "train": DatasetConfig(split=True, required=True), "test": DatasetConfig(), "baz": DatasetConfig(split=True), } def __init__( self, num_workers: int, expect_ds: bool, expect_sizes: Optional[dict], **kwargs ): def train_loop_per_worker(): data_shard = session.get_dataset_shard("train") if expect_ds: assert isinstance(data_shard, Dataset), data_shard else: assert isinstance(data_shard, DatasetPipeline), data_shard for k, v in expect_sizes.items(): shard = session.get_dataset_shard(k) if v == -1: assert shard is None, shard else: if isinstance(shard, DatasetPipeline): assert next(shard.iter_epochs()).count() == v, shard else: assert shard.count() == v, shard kwargs.pop("scaling_config", None) super().__init__( train_loop_per_worker=train_loop_per_worker, scaling_config=ScalingConfig(num_workers=num_workers), **kwargs, ) class TestWildcard(TestBasic): _dataset_config = { "train": DatasetConfig(split=True, required=True), "*": DatasetConfig(split=True), } def test_basic(ray_start_4_cpus): ds = ray.data.range_table(10) # Single worker basic case. test = TestBasic( 1, True, {"train": 10, "test": 10}, dataset_config={}, datasets={"train": ds, "test": ds}, ) test.fit() # Single worker, no test ds. test = TestBasic( 1, True, {"train": 10, "test": -1}, dataset_config={}, datasets={"train": ds} ) test.fit() # Two workers, train split. test = TestBasic( 2, True, {"train": 5, "test": 10}, datasets={"train": ds, "test": ds} ) test.fit() # Two workers, wild split. test = TestWildcard( 2, True, {"train": 5, "wild": 5}, datasets={"train": ds, "wild": ds} ) test.fit() # Two workers, both split. test = TestBasic( 2, True, {"train": 5, "test": 5}, dataset_config={"test": DatasetConfig(split=True)}, datasets={"train": ds, "test": ds}, ) # Test get config. assert test.get_dataset_config()["train"].split assert test.get_dataset_config()["test"].split test.fit() def test_error(ray_start_4_cpus): ds = ray.data.range_table(10) # Missing required dataset. with pytest.raises(ValueError): TestBasic( 1, True, {"train": 10, "test": 10}, dataset_config={}, datasets={"test": ds} ) # Missing optional dataset is OK. test = TestBasic( 1, True, {"train": 10}, dataset_config={}, datasets={"train": ds}, preprocessor=BatchMapper(lambda x: x, batch_format="pandas"), ) test.fit() # Extra dataset. with pytest.raises(ValueError): TestBasic( 1, True, {"train": 10, "test": 10}, dataset_config={}, datasets={"train": ds, "blah": ds}, ) def test_use_stream_api_config(ray_start_4_cpus): ds = ray.data.range_table(10) # Single worker basic case. test = TestBasic( 1, False, {"train": 10, "test": 10}, dataset_config={"train": DatasetConfig(use_stream_api=True)}, datasets={"train": ds, "test": ds}, ) test.fit() # Two worker split pipeline. test = TestBasic( 2, False, {"train": 5, "test": 10}, dataset_config={"train": DatasetConfig(use_stream_api=True)}, datasets={"train": ds, "test": ds}, ) test.fit() def test_fit_transform_config(ray_start_4_cpus): ds = ray.data.range_table(10) def drop_odd_pandas(rows): key = list(rows)[0] return rows[(rows[key] % 2 == 0)] def drop_odd_numpy(rows): return [x for x in rows if x % 2 == 0] prep_pandas = BatchMapper(drop_odd_pandas, batch_format="pandas") prep_numpy = BatchMapper(drop_odd_numpy, batch_format="numpy") # Single worker basic case. test = TestBasic( 1, True, {"train": 5, "test": 5}, dataset_config={}, datasets={"train": ds, "test": ds}, preprocessor=prep_pandas, ) test.fit() # Single worker basic case. test = TestBasic( 1, True, {"train": 5, "test": 5}, dataset_config={}, datasets={"train": ds, "test": ds}, preprocessor=prep_numpy, ) test.fit() # No transform for test. test = TestBasic( 1, True, {"train": 5, "test": 10}, dataset_config={"test": DatasetConfig(transform=False)}, datasets={"train": ds, "test": ds}, preprocessor=prep_pandas, ) test.fit() class TestStream(DataParallelTrainer): _dataset_config = { "train": DatasetConfig(split=True, required=True, use_stream_api=True), } def __init__(self, check_results_fn, **kwargs): def train_loop_per_worker(): data_shard = session.get_dataset_shard("train") assert isinstance(data_shard, DatasetPipeline), data_shard results = [] for epoch in data_shard.iter_epochs(2): results.append(epoch.take()) check_results_fn(data_shard, results) kwargs.pop("scaling_config", None) super().__init__( train_loop_per_worker=train_loop_per_worker, scaling_config=ScalingConfig(num_workers=1), **kwargs, ) class TestBatch(DataParallelTrainer): _dataset_config = { "train": DatasetConfig(split=True, required=True, use_stream_api=False), } def __init__(self, check_results_fn, **kwargs): def train_loop_per_worker(): data_shard = session.get_dataset_shard("train") assert isinstance(data_shard, Dataset), data_shard results = data_shard.take() check_results_fn(data_shard, results) kwargs.pop("scaling_config", None) super().__init__( train_loop_per_worker=train_loop_per_worker, scaling_config=ScalingConfig(num_workers=1), **kwargs, ) def test_stream_inf_window_cache_prep(ray_start_4_cpus): def checker(shard, results): results = [sorted(r) for r in results] assert len(results[0]) == 5, results assert results[0] == results[1], results stats = shard.stats() assert str(shard) == "DatasetPipeline(num_windows=inf, num_stages=1)", shard assert "Stage 1 read->map_batches: 1/1 blocks executed " in stats, stats def rand(x): return [random.random() for _ in range(len(x))] prep = BatchMapper(rand, batch_format="pandas") ds = ray.data.range_table(5, parallelism=1) test = TestStream( checker, preprocessor=prep, datasets={"train": ds}, dataset_config={"train": DatasetConfig(stream_window_size=-1)}, ) test.fit() def test_stream_finite_window_nocache_prep(ray_start_4_cpus): def rand(x): return [random.random() for _ in range(len(x))] prep = BatchMapper(rand, batch_format="pandas") ds = ray.data.range_table(5) # Test the default 1GiB window size. def checker(shard, results): results = [sorted(r) for r in results] assert int(results[0][0]) != results[0][0] assert len(results[0]) == 5, results assert results[0] != results[1], results stats = shard.stats() assert str(shard) == "DatasetPipeline(num_windows=inf, num_stages=1)", shard assert ( "Stage 1 read->randomize_block_order->map_batches: 1/1 blocks executed " in stats ), stats test = TestStream( checker, preprocessor=prep, datasets={"train": ds}, dataset_config={"train": DatasetConfig()}, ) test.fit() # Test a smaller window size. def checker(shard, results): results = [sorted(r) for r in results] assert int(results[0][0]) != results[0][0] assert len(results[0]) == 5, results assert results[0] != results[1], results stats = shard.stats() assert str(shard) == "DatasetPipeline(num_windows=inf, num_stages=1)", shard assert ( "Stage 1 read->randomize_block_order->map_batches: 1/1 blocks executed " in stats ), stats test = TestStream( checker, preprocessor=prep, datasets={"train": ds}, dataset_config={"train": DatasetConfig(stream_window_size=10)}, ) test.fit() def test_global_shuffle(ray_start_4_cpus): def checker(shard, results): assert len(results[0]) == 5, results assert results[0] != results[1], results stats = shard.stats() assert str(shard) == "DatasetPipeline(num_windows=inf, num_stages=1)", shard assert "Stage 1 read->randomize_block_order->random_shuffle" in stats, stats ds = ray.data.range_table(5) test = TestStream( checker, datasets={"train": ds}, dataset_config={"train": DatasetConfig(global_shuffle=True)}, ) test.fit() def checker(shard, results): assert len(results) == 5, results stats = shard.stats() assert "Stage 1 read->random_shuffle" in stats, stats ds = ray.data.range_table(5) test = TestBatch( checker, datasets={"train": ds}, dataset_config={"train": DatasetConfig(global_shuffle=True)}, ) test.fit() def test_randomize_block_order(ray_start_4_cpus): def checker(shard, results): stats = shard.stats() assert "randomize_block_order: 5/5 blocks executed in 0s" in stats, stats ds = ray.data.range_table(5) test = TestStream( checker, datasets={"train": ds}, ) test.fit() def checker(shard, results): stats = shard.stats() assert "randomize_block_order" not in stats, stats ds = ray.data.range_table(5) test = TestStream( checker, datasets={"train": ds}, dataset_config={"train": DatasetConfig(randomize_block_order=False)}, ) test.fit() def checker(shard, results): assert len(results) == 5, results stats = shard.stats() assert "randomize_block_order: 5/5 blocks executed" in stats, stats ds = ray.data.range_table(5) test = TestBatch( checker, datasets={"train": ds}, ) test.fit() if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", "-x", __file__]))
apache-2.0
giorgiop/scikit-learn
examples/model_selection/plot_nested_cross_validation_iris.py
11
4259
""" ========================================= Nested versus non-nested cross-validation ========================================= This example compares non-nested and nested cross-validation strategies on a classifier of the iris data set. Nested cross-validation (CV) is often used to train a model in which hyperparameters also need to be optimized. Nested CV estimates the generalization error of the underlying model and its (hyper)parameter search. Choosing the parameters that maximize non-nested CV biases the model to the dataset, yielding an overly-optimistic score. Model selection without nested CV uses the same data to tune model parameters and evaluate model performance. Information may thus "leak" into the model and overfit the data. The magnitude of this effect is primarily dependent on the size of the dataset and the stability of the model. See Cawley and Talbot [1]_ for an analysis of these issues. To avoid this problem, nested CV effectively uses a series of train/validation/test set splits. In the inner loop, the score is approximately maximized by fitting a model to each training set, and then directly maximized in selecting (hyper)parameters over the validation set. In the outer loop, generalization error is estimated by averaging test set scores over several dataset splits. The example below uses a support vector classifier with a non-linear kernel to build a model with optimized hyperparameters by grid search. We compare the performance of non-nested and nested CV strategies by taking the difference between their scores. .. topic:: See Also: - :ref:`cross_validation` - :ref:`grid_search` .. topic:: References: .. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and subsequent selection bias in performance evaluation. J. Mach. Learn. Res 2010,11, 2079-2107. <http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_ """ from sklearn.datasets import load_iris from matplotlib import pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, KFold import numpy as np print(__doc__) # Number of random trials NUM_TRIALS = 30 # Load the dataset iris = load_iris() X_iris = iris.data y_iris = iris.target # Set up possible values of parameters to optimize over p_grid = {"C": [1, 10, 100], "gamma": [.01, .1]} # We will use a Support Vector Classifier with "rbf" kernel svr = SVC(kernel="rbf") # Arrays to store scores non_nested_scores = np.zeros(NUM_TRIALS) nested_scores = np.zeros(NUM_TRIALS) # Loop for each trial for i in range(NUM_TRIALS): # Choose cross-validation techniques for the inner and outer loops, # independently of the dataset. # E.g "LabelKFold", "LeaveOneOut", "LeaveOneLabelOut", etc. inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring clf = GridSearchCV(estimator=svr, param_grid=p_grid, cv=inner_cv) clf.fit(X_iris, y_iris) non_nested_scores[i] = clf.best_score_ # Nested CV with parameter optimization nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv) nested_scores[i] = nested_score.mean() score_difference = non_nested_scores - nested_scores print("Average difference of {0:6f} with std. dev. of {1:6f}." .format(score_difference.mean(), score_difference.std())) # Plot scores on each trial for nested and non-nested CV plt.figure() plt.subplot(211) non_nested_scores_line, = plt.plot(non_nested_scores, color='r') nested_line, = plt.plot(nested_scores, color='b') plt.ylabel("score", fontsize="14") plt.legend([non_nested_scores_line, nested_line], ["Non-Nested CV", "Nested CV"], bbox_to_anchor=(0, .4, .5, 0)) plt.title("Non-Nested and Nested Cross Validation on Iris Dataset", x=.5, y=1.1, fontsize="15") # Plot bar chart of the difference. plt.subplot(212) difference_plot = plt.bar(range(NUM_TRIALS), score_difference) plt.xlabel("Individual Trial #") plt.legend([difference_plot], ["Non-Nested CV - Nested CV Score"], bbox_to_anchor=(0, 1, .8, 0)) plt.ylabel("score difference", fontsize="14") plt.show()
bsd-3-clause
TUW-GEO/rt1
rt1/rtplots.py
1
109404
""" Class for quick visualization of results and used phasefunctions """ from functools import partial from itertools import cycle import datetime import numpy as np import sympy as sp import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.lines as mlines from matplotlib.colors import LinearSegmentedColormap, Normalize from matplotlib.widgets import Slider, CheckButtons from matplotlib.gridspec import GridSpec from matplotlib.gridspec import GridSpecFromSubplotSpec import matplotlib.ticker as ticker try: from mpl_toolkits.mplot3d import Axes3D except ImportError: pass from .general_functions import ( rectangularize, dBsig0convert, pairwise, ) pd.plotting.register_matplotlib_converters() def polarplot( X=None, inc=[15.0, 35.0, 55.0, 75.0], multip=2.0, label=None, aprox=True, legend=True, legpos=(0.75, 0.5), groundcolor="none", param_dict=[{}], polarax=None, ): """ Generation of polar-plots of the volume- and the surface scattering phase function as well as the used approximations in terms of legendre-polynomials. Parameters ----------- SRF : RT1.surface class object Alternative direct specification of the surface BRDF, e.g. SRF = CosineLobe(i=3, ncoefs=5) V : RT1.volume class object Alternative direct specification of the volume-scattering phase-function e.g. V = Rayleigh() Other Parameters ----------------- inc : list of floats (default = [15.,35.,55.,75.]) Incidence-angles in degree at which the volume-scattering phase-function will be plotted multip : float (default = 2.) Multiplicator to scale the plotrange for the plot of the volume-scattering phase-function (the max-plotrange is given by the max. value of V in forward-direction (for the chosen incp) ) label : string Manual label for the volume-scattering phase-function plot aprox : boolean (default = True) Indicator if the approximation of the phase-function in terms of Legendre-polynomials will be plotted. legend : boolean (default = True) Indicator if a legend should be shown that indicates the meaning of the different colors for the phase-function legpos : (float,float) (default = (0.75,0.5)) Positioning of the legend for the V-plot (controlled via the matplotlib.legend keyword bbox_to_anchor = plegpos ) groundcolor : string (default = "none") Matplotlib color-indicator to change the color of the lower hemisphere in the BRDF-plot possible values are: ('r', 'g' , 'b' , 'c' , 'm' , 'y' , 'k' , 'w' , 'none') polarax: matplotlib.axes the axes to use... it must be a polar-axes, e.g.: >>> polarax = fig.add_subplot(111, projection='polar') param_dict : dict (or list of dicts) a dictionary containing the names and values of the symbolic parameters required to fully specify the V/SRF functions. if a list of dicts is provided, the specifications are plotted on top of each other. Returns --------- polarfig : figure a matplotlib figure showing a polar-plot of the functions specified by V or SRF """ assert isinstance(inc, list), ( "Error: incidence-angles for " + "polarplot must be a list" ) assert isinstance(multip, float), ( "Error: plotrange-multiplier " + "for polarplot must be a floating-point number" ) if X is None: assert False, "Error: You must provide a volume- or surface object!" if isinstance(param_dict, dict): param_dict = [param_dict] if polarax is None: fig = plt.figure(figsize=(7, 7)) polarax = fig.add_subplot(111, projection="polar") else: assert polarax.name == "polar", "you must provide a polar-axes!" if ".surface." in str(X.__class__): if label is None: label = "Surface-Scattering Phase Function" funcname = "brdf" angs = ["theta_ex", "theta_s", "phi_ex", "phi_s"] def angsub(ti): return ti thetass = np.arange(-np.pi / 2.0, np.pi / 2.0, 0.01) polarax.fill( np.arange(np.pi / 2.0, 3.0 * np.pi / 2.0, 0.01), np.ones_like(np.arange(np.pi / 2.0, 3.0 * np.pi / 2.0, 0.01)) * 1 * 1.2, color=groundcolor, ) if ".volume." in str(X.__class__): if label is None: label = "Volume-Scattering Phase Function" funcname = "p" angs = ["theta_0", "theta_s", "phi_0", "phi_s"] def angsub(ti): return np.pi - ti thetass = np.arange(0.0, 2.0 * np.pi, 0.01) # plot of volume-scattering phase-function's pmax = 0 for n_X, X in enumerate(np.atleast_1d(X)): for n_p, params in enumerate(param_dict): # define a plotfunction of the legendre-approximation of p if aprox is True: phasefunktapprox = sp.lambdify( (*angs, *params.keys()), X.legexpansion(*angs, geometry="vvvv").doit(), modules=["numpy", "sympy"], ) # set incidence-angles for which p is calculated plottis = np.deg2rad(inc) colors = cycle(["k", "r", "g", "b", "c", "m", "y"]) used_colors = [] for i in plottis: ts = np.arange(0.0, 2.0 * np.pi, 0.01) pmax_i = multip * np.max( getattr(X, funcname)( np.full_like(ts, i), ts, 0.0, 0.0, param_dict=params, ) ) if pmax_i > pmax: pmax = pmax_i if legend is True: legend_lines = [] # set color-counter to 0 for ti in plottis: color = next(colors) used_colors.append(color) rad = getattr(X, funcname)( ti, thetass, 0.0, 0.0, param_dict=params ) if aprox is True: # the use of np.pi-ti stems from the definition # of legexpansion() in volume.py radapprox = phasefunktapprox( angsub(ti), thetass, 0.0, 0.0, **params ) # set theta direction to clockwise polarax.set_theta_direction(-1) # set theta to start at z-axis polarax.set_theta_offset(np.pi / 2.0) polarax.plot(thetass, rad, color) if aprox is True: polarax.plot(thetass, radapprox, color + "--") polarax.arrow( -ti, pmax * 1.2, 0.0, -pmax * 0.8, head_width=0.0, head_length=0.0, fc=color, ec=color, lw=1, alpha=0.3, ) polarax.fill_between(thetass, rad, alpha=0.2, color=color) polarax.set_xticks(np.deg2rad([0, 45, 90, 125, 180])) polarax.set_xticklabels( [ r"$0^\circ$", r"$45^\circ$", r"$90^\circ$", r"$135^\circ$", r"$180^\circ$", ] ) polarax.set_yticklabels([]) polarax.set_rmax(pmax * 1.2) polarax.set_title(label + "\n") polarax.set_rmin(0.0) # add legend for covering layer phase-functions used_colors = iter(used_colors) if legend is True: for ti in plottis: color = next(used_colors) legend_lines += [ mlines.Line2D( [], [], color=color, label=r"$\theta_0$ = " + str(np.round_(np.rad2deg(ti), decimals=1)) + r"${}^\circ$", ) ] i = i + 1 if aprox is True: legend_lines += [ mlines.Line2D([], [], color="k", linestyle="--", label="approx.") ] legend = polarax.legend(bbox_to_anchor=legpos, loc=2, handles=legend_lines) legend.get_frame().set_facecolor("w") legend.get_frame().set_alpha(0.5) return fig def hemreflect( R=None, SRF=None, phi_0=0.0, t_0_step=5.0, t_0_min=0.0, t_0_max=90.0, simps_N=1000, showpoints=True, returnarray=False, param_dict={}, ): """ Numerical evaluation of the hemispherical reflectance of the given BRDF-function using scipy's implementation of the Simpson-rule integration scheme. Parameters ------------ R : RT1-class object definition of the brdf-function to be evaluated (either R or SRF must be provided) The BRDf is defined via: BRDF = R.SRF.NormBRDF * R.SRF.brdf() SRF : Surface-class object definition of the brdf-function to be evaluated (either R or SRF must be provided) The BRDf is defined via: BRDF = SRF.NormBRDF * SRF.brdf() Other Parameters ----------------- phi_0 : float incident azimuth-angle (for spherically symmetric phase-functions the result is independent of the choice of phi_0) t_0_step : float separation of the incidence-angle grid-spacing in DEGREE for which the hemispherical reflectance will be calculated t_0_min : float minimum incidence-angle t_0_max : float maximum incidence-angle simps_N : integer number of points used in the discretization of the brdf within the Simpson-rule showpoints : boolean show or hide integration-points in the plot param_dict : dict a dictionary containing the names and values of the symbolic parameters required to define the SRF function Returns -------- fig : figure a matplotlib figure showing the incidence-angle dependent hemispherical reflectance """ from scipy.integrate import simps # choose BRDF function to be evaluated if R is not None: BRDF = R.SRF.brdf try: Nsymb = R.SRF.NormBRDF.free_symbols Nfunc = sp.lambdify(Nsymb, R.SRF.NormBRDF, modules=["numpy"]) NormBRDF = Nfunc(*[param_dict[str(i)] for i in Nsymb]) except Exception: NormBRDF = R.SRF.NormBRDF elif SRF is not None: BRDF = SRF.brdf try: Nsymb = SRF.NormBRDF[0].free_symbols Nfunc = sp.lambdify(Nsymb, SRF.NormBRDF, modules=["numpy"]) NormBRDF = Nfunc(*[param_dict[str(i)] for i in Nsymb]) except Exception: NormBRDF = SRF.NormBRDF else: assert False, "Error: You must provide either R or SRF" # set incident (zenith-angle) directions for which the integral # should be evaluated! incnum = np.arange(t_0_min, t_0_max, t_0_step) # define grid for integration x = np.linspace(0.0, np.pi / 2.0, simps_N) y = np.linspace(0.0, 2 * np.pi, simps_N) # initialize array for solutions sol = [] # ---- evaluation of Integral # adapted from # (http://stackoverflow.com/questions/20668689/integrating-2d-samples-on-a-rectangular-grid-using-scipy) for theta_0 in np.deg2rad(incnum): # define the function that has to be integrated # (i.e. Eq.20 in the paper) # notice the additional np.sin(thetas) which oritinates from # integrating over theta_s instead of mu_s def integfunkt(theta_s, phi_s): return ( np.sin(theta_s) * np.cos(theta_s) * BRDF(theta_0, theta_s, phi_0, phi_s, param_dict=param_dict) ) # evaluate the integral using Simpson's Rule twice z = integfunkt(x[:, None], y) sol = sol + [simps(simps(z, y), x)] sol = np.array(sol) * NormBRDF # print warning if the hemispherical reflectance exceeds 1 if np.any(sol > 1.0): print("ATTENTION, Hemispherical Reflectance > 1 !") if returnarray is True: return sol else: # generation of plot fig = plt.figure() axnum = fig.add_subplot(1, 1, 1) if len(sol.shape) > 1: for i, sol in enumerate(sol): axnum.plot(incnum, sol, label="NormBRDF = " + str(NormBRDF[i])) if showpoints is True: axnum.plot(incnum, sol, "r.") else: axnum.plot(incnum, sol, "k", label="NormBRDF = " + str(NormBRDF)) if showpoints is True: axnum.plot(incnum, sol, "r.") axnum.set_xlabel("$\\theta_0$ [deg]") axnum.set_ylabel("$R(\\theta_0)$") axnum.set_title("Hemispherical reflectance ") axnum.set_ylim(0.0, np.max(sol) * 1.1) axnum.legend() axnum.grid() return fig class plot: """ Generation of plots to visualize rtfits results - scatter generate a scatterplot of measured vs. modelled data - fit_timeseries generate a plot showing the temporal and incidence-angle dependency of measured vs. modelled data. - fit_errors generate a plot showing the temporal and incidence-angle dependency of the residuals of measured vs. modelled data. - results generate a plot showing the fitted curves and the obtained parameter timeseries - single_results plot the data and the fitted curves for individual measurement-groups as defined by the frequencies of the fitted parameters - intermediate_results only available if *performfit* has been called with *intermediate_results = True* ! generate a plot showing the development of the fitted parameters and the residuals for each fit-iteration - printsig0analysis a widget to analyze the fit-results of individual timestamps within the considered timeseries """ def __init__(self, fit=None, **kwargs): self.fit = fit def scatter(self, fit=None, mima=None, pointsize=0.5, regression=True, **kwargs): """ geerate a scatterplot of modelled vs. original backscatter data Parameters ----------- fit : list output of performfit()-function Other Parameters ------------------ mima : list manual definition plot-boundaries via mima = [min, max] pointsize : float manual specification of pointsize regression : bool (default = True) indicator if the scipy.stats.linregress should be called to get the regression-line and the r^2 value kwargs : - kwargs passed to matplotlib.pyplot.scatter() Returns ------- fig : matplotlib.figure the used matplotlib figure instance """ plot("asdf") if fit is None: fit = self.fit fig = plt.figure() ax = fig.add_subplot(111) # prepare measurements # measures = fit.dataset.sig.values # calculate estimates # estimates = fit.calc_model().tot.values measures, estimates = pd.concat( [fit.dataset.sig, fit.calc_model().tot], axis=1 ).values.T if mima is None: mi = np.min((measures, estimates)) ma = np.max((measures, estimates)) else: mi, ma = mima ax.scatter(estimates, measures, s=pointsize, alpha=0.7, **kwargs) # plot 45degree-line ax.plot([mi, ma], [mi, ma], "k--") if fit.sig0 is True: quantity = r"$\sigma_0$" else: quantity = "Intensity" if fit.dB is True: scale = "[dB]" else: scale = "" ax.set_xlabel("modelled " + quantity + scale) ax.set_ylabel("measured " + quantity + scale) if regression is True: from scipy.stats import linregress # evaluate linear regression to get r-value etc. slope, intercept, r_value, p_value, std_err = linregress( estimates, measures ) ax.plot( np.sort(measures), intercept + slope * np.sort(measures), "r--", alpha=0.4, ) ax.text( 0.8, 0.1, "$R^2$ = " + str(np.round(r_value ** 2, 2)), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) return fig def fit_timeseries( self, fit=None, dB=True, sig0=True, params=None, printtot=True, printsurf=True, printvol=True, printint=True, printorig=True, months=None, years=None, ylim=None, printinc=True, ): """ Print individual contributions, resulting parameters and the reference dataset of an rt1.rtfits object as timeseries. Parameters ------------- fit : rtfits object the rtfits-object containing the fit-results dB : bool (default = True) indicator if the plot is intended to be in dB or linear units sig0 : bool (default = True) indicator if sigma_0 (sig0) or intensity (I) is displayed The applied relation is: sig0 = 4.*pi*cos(theta) * I params: list a list of parameter-names that should be overprinted (the names must coincide with the arguments of set_V_SRF()) printtot, printsurf, printvol, printint, printorig : bool indicators if the corresponding components should be plotted months : list of int (default = None) a list of months to plot (if None, all will be plotted) years : list of int (default = None) a list of years to select (if None, all will be plotted) ylim : tuple a tuple of (ymin, ymax) that will be used as boundaries for the y-axis printinc : bool (default = True) indicator if the incidence-angle dependency should be plotted (in a separate plot alongside the timeseries) Returns -------------- f : matplotlib.figure the used matplotlib figure instance """ if fit is None: fit = self.fit # get incidence-angles inc_array = np.ma.masked_array(fit.inc, fit.mask) inc = inc_array.compressed() # get input dataset data = np.ma.masked_array(fit.data, fit.mask) # calculate individual contributions contrib = fit.calc_model(return_components=True) contrib["$\\sigma_0$ dataset"] = fit.dataset.sig contrib["inc"] = fit.dataset.inc # convert units complist = [i for i in contrib.keys() if i not in ["inc"]] contrib[complist] = contrib[complist].apply( dBsig0convert, inc=inc, dB=dB, sig0=sig0, fitdB=fit.dB, fitsig0=fit.sig0, ) # drop unneeded columns if printint is False and "inter" in contrib: contrib = contrib.drop("inter", axis=1) if printtot is False: contrib = contrib.drop("tot", axis=1) if printsurf is False: contrib = contrib.drop("surf", axis=1) if printvol is False: contrib = contrib.drop("vol", axis=1) if printorig is False: contrib = contrib.drop("$\\sigma_0$ dataset", axis=1) # select years and months if years is not None: contrib = contrib.loc[contrib.index.year.isin(years)] if months is not None: contrib = contrib.loc[contrib.index.month.isin(months)] # print incidence-angle dependency if printinc is True: f, [ax, ax_inc] = plt.subplots( ncols=2, figsize=(15, 5), gridspec_kw={"width_ratios": [3, 1]}, sharey=True, ) f.subplots_adjust(left=0.05, right=0.98, top=0.98, bottom=0.1, wspace=0.1) # ------------------- color = { "tot": "r", "surf": "b", "vol": "g", "inter": "y", "$\\sigma_0$ dataset": "k", } groupedcontrib = contrib.groupby(contrib.index) # return contrib, groupedcontrib for label in contrib.keys(): if label in ["inc"]: continue a = np.rad2deg( rectangularize( [x.values for _, x in groupedcontrib["inc"]], return_masked=True, ) ).T b = np.array( rectangularize( [x.values for _, x in groupedcontrib[label]], return_masked=True, ) ).T x = np.array([a, b]).T l_col = mpl.collections.LineCollection( x, linewidth=0.25, label="x", color=color[label], alpha=0.5 ) ax_inc.add_collection(l_col) ax_inc.scatter(a, b, color=color[label], s=1) ax_inc.set_xlim(a.min(), a.max()) ax_inc.set_xlabel("$\\theta_0$") ax_inc.set_xlabel("$\\theta_0$") else: f, ax = plt.subplots(figsize=(12, 5)) f.subplots_adjust(left=0.05, right=0.98, top=0.98, bottom=0.1, wspace=0.05) for label, val in contrib.items(): if label in ["inc"]: continue color = {"tot": "r", "surf": "b", "vol": "g", "inter": "y"} if printorig is True: color["$\\sigma_0$ dataset"] = "k" ax.plot( val.sort_index(), linewidth=0.25, marker=".", ms=2, label=label, color=color[label], alpha=0.5, ) # overprint parameters if params is not None: paramdf = fit.res_df if years is not None: paramdf = paramdf.loc[paramdf.index.year.isin(years)] if months is not None: paramdf = paramdf.loc[paramdf.index.month.isin(months)] pax = ax.twinx() for k in params: pax.plot(paramdf[k], lw=1, marker=".", ms=2, label=k) pax.legend(loc="upper right", ncol=5) pax.set_ylabel("parameter-values") # format datetime index ax.xaxis.set_minor_locator(mpl.dates.MonthLocator()) ax.xaxis.set_minor_formatter(mpl.dates.DateFormatter("%m")) ax.xaxis.set_major_locator(mpl.dates.YearLocator()) ax.xaxis.set_major_formatter(mpl.dates.DateFormatter("\n%Y")) # set ylabels if sig0 is True: label = "$\\sigma_0$" else: label = "Intensity" if dB is True: label += " [dB]" ax.set_ylabel(label) # generate legend hand, lab = ax.get_legend_handles_labels() lab, unique_ind = np.unique(lab, return_index=True) ax.legend( handles=list(np.array(hand)[unique_ind]), labels=list(lab), loc="upper left", ncol=5, ) if ylim is not None: ax.set_ylim(ylim) return f def fit_errors(self, fit=None, relative=False, result_selection="all"): """ a function to quickly print residuals for each measurement and for each incidence-angle value Parameters ------------ fit : list output of performfit()-function relative : bool (default = False) indicator if relative (True) or absolute (False) residuals shall be plotted Returns -------------- fig : matplotlib.figure the used matplotlib figure instance """ if fit is None: fit = self.fit if result_selection == "all": result_selection = range(len(fit.data)) # Calculate the residuals (based on R, inc and res_dict) fit.R.t_0 = fit.inc fit.R.p_0 = np.zeros_like(fit.inc) estimates = fit._calc_model() # calculate the residuals based on masked arrays masked_estimates = np.ma.masked_array(estimates, mask=fit.mask) masked_data = np.ma.masked_array(fit.data, mask=fit.mask) res = np.ma.sqrt((masked_estimates - masked_data) ** 2) if relative is True: res = res / masked_estimates # apply mask to data and incidence-angles (and convert to degree) inc = np.ma.masked_array(np.rad2deg(fit.inc), mask=fit.mask) # make new figure fig = plt.figure(figsize=(14, 10)) ax = fig.add_subplot(212) if relative is True: ax.set_title("Mean relative residual per measurement") else: ax.set_title("Mean absolute residual per measurement") ax2 = fig.add_subplot(211) if relative is True: ax2.set_title("Relative residuals per incidence-angle") else: ax2.set_title("Residuals per incidence-angle") # the use of masked arrays might cause python 2 compatibility issues! ax.plot(fit.index[result_selection], res[result_selection], ".", alpha=0.5) # plot mean residual for each measurement ax.plot( fit.index[result_selection], np.ma.mean(res[result_selection], axis=1), "k", linewidth=3, marker="o", fillstyle="none", ) # plot total mean of mean residuals per measurement ax.plot( fit.index[result_selection], [np.ma.mean(np.ma.mean(res[result_selection], axis=1))] * len(result_selection), "k--", ) # add some legends res_h = mlines.Line2D( [], [], color="black", label="Mean res. per measurement", linestyle="-", linewidth=3, marker="o", fillstyle="none", ) res_h_dash = mlines.Line2D( [], [], color="black", linestyle="--", label="Average mean res.", linewidth=1, fillstyle="none", ) res_h_dots = mlines.Line2D( [], [], color="black", label="Residuals", linestyle="-", linewidth=0, marker=".", alpha=0.5, ) handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles + [res_h_dots] + [res_h] + [res_h_dash], loc=1) ax.set_ylabel("Residual") # # evaluate mean residuals per incidence-angle meanincs = np.ma.unique(np.concatenate(inc[result_selection])) mean = np.full_like(meanincs, 0.0) for a, incval in enumerate(meanincs): select = np.where(inc[result_selection] == incval) res_selected = res[result_selection][ select[0][:, np.newaxis], select[1][:, np.newaxis] ] mean[a] = np.ma.mean(res_selected) sortpattern = np.argsort(meanincs) meanincs = meanincs[sortpattern] mean = mean[sortpattern] # plot residuals per incidence-angle for each measurement for i, resval in enumerate(res[result_selection]): sortpattern = np.argsort(inc[result_selection[i]]) ax2.plot( inc[result_selection[i]][sortpattern], resval[sortpattern], ":", alpha=0.5, marker=".", ) # plot mean residual per incidence-angle ax2.plot(meanincs, mean, "k", linewidth=3, marker="o", fillstyle="none") # add some legends res_h2 = mlines.Line2D( [], [], color="black", label="Mean res. per inc-angle", linestyle="-", linewidth=3, marker="o", fillstyle="none", ) res_h_lines = mlines.Line2D( [], [], color="black", label="Residuals", linestyle=":", alpha=0.5 ) handles2, labels2 = ax2.get_legend_handles_labels() ax2.legend(handles=handles2 + [res_h_lines] + [res_h2], loc=1) ax2.set_xlabel("$\\theta_0$ [deg]") ax2.set_ylabel("Residual") # find minimum and maximum incidence angle maxinc = np.max(inc) mininc = np.min(inc) ax2.set_xlim(np.floor(mininc) - 1, np.ceil(maxinc) + 1) # set major and minor ticks ax2.xaxis.set_major_locator(plt.MultipleLocator(1)) ax2.xaxis.set_major_formatter(plt.FormatStrFormatter("%d")) ax2.xaxis.set_minor_locator(plt.MultipleLocator(0.25)) # set ticks if isinstance(fit.index[0], datetime.datetime): plt.setp(ax.get_xticklabels(), rotation=30, ha="right") fig.tight_layout() return fig def results( self, fit=None, startvals=False, legend=False, result_selection="all", legend_fmt="%d.%m.%Y %H:%M:%S", ): """ a function to quickly print the fit-results and the gained parameters Parameters ------------ fit : list output of performfit()-function truevals : dict (default = None) dictionary of the expected parameter-values (must be of the same shape as the parameter-values gained from the fit). if provided, the difference between the expected- and fitted values is plotted startvals : bool (default = False) if True, the model-results using the start-values are plotted as black lines legend : bool (default = True) indicator if legends should be plotted legend_fmt : str (default = '%d.%m.%Y %H:%M:%S') the datetime-format to use when printing a legend result_selection : list-like or 'all' a list of the measurement-numbers that should be plotted (indexed starting from 0) or 'all' in case all measurements should be plotted Returns -------------- fig : matplotlib.figure the used matplotlib figure instance """ if fit is None: fit = self.fit if result_selection == "all": result_selection = range(len(fit.data)) # generate figure fig = plt.figure(figsize=(14, 10)) ax = fig.add_subplot(211) ax.set_title("Fit-results") # calculate results fitplot = fit._calc_model() # get masked data masked_data = np.ma.masked_array(fit.data, fit.mask) # get labels try: labels = pd.to_datetime(fit.fit_index).strftime(legend_fmt)[ result_selection ] except Exception: labels = result_selection for i in result_selection: (l,) = ax.plot(fit.inc[i], fitplot[i], alpha=0.4, label=labels[i]) ax.plot(fit.inc[i], masked_data[i], ".", c=l.get_color()) if legend is True: if len(result_selection) > 20: print("more than 20 lines are plotted... legend is disabled") else: ax.legend(loc=1) # ----------- plot start-values ------------ if startvals is True: startplot = fit._calc_model(res_dict=fit.start_dict) for i, val in enumerate(startplot[result_selection]): if i == 0: label = "fitstart" else: label = "" ax.plot( fit.inc[result_selection[i]], val, "k--", linewidth=1, alpha=0.5, label=label, ) if fit.sig0 is False: label = "$I^{tot}$" elif fit.sig0 is True: label = r"$\sigma_0^{tot}$" if fit.dB is True: label += " [dB]" ax.set_ylabel(label) ax.set_xlabel("$\\theta_0$") ax2 = fig.add_subplot(212) ax2.set_title("Estimated parameters") ax2.set_ylabel("Parameters") # plot fitted values # assign colors colordict = {key: f"C{i%10}" for i, key in enumerate(fit.res_dict.keys())} for key, val in fit.res_df.items(): ax2.plot(val, alpha=1.0, label=key, color=colordict[key]) if len(result_selection) < len(fit.data): for i, resid in enumerate(result_selection): ax2.text( fit.index[resid], ax2.get_ylim()[1] * 0.9, resid, bbox=dict(facecolor=f"C{i}", alpha=0.5), ) ax2.legend(loc=1) fig.tight_layout() return fig def single_results( self, fit=None, fit_numbers=None, fit_indexes=None, hexbinQ=True, hexbinargs={}, convertTodB=False, datetime_unit="h", ): """ a function to investigate the quality of the individual fits Parameters ------------ fit : rt1.rtfits.Fits object the fit-object to use fit_numbers : list a list containing the position of the measurements that should be plotted (starting from 0) fit_indexes : list a list containing the index-values of the measurements that should be plotted Other Parameters ------------------ hexbinQ : bool (default = False) indicator if a hexbin-plot should be underlayed that shows the distribution of the datapoints hexbinargs : dict a dict containing arguments to customize the hexbin-plot convertTodB : bool (default=False) if set to true, the datasets will be converted to dB Returns -------------- fig : matplotlib.figure the used matplotlib figure instance """ if fit is None: fit = self.fit if fit_numbers is not None and fit_indexes is not None: assert False, "please provide EITHER fit_numbers OR fit_indexes!" elif fit_indexes is not None: # fit_numbers = np.where(fit.index.isin(fit_indexes))[0] fit_numbers = np.argmin( np.abs( fit.fit_index - np.expand_dims( np.atleast_1d(np.array(fit_indexes, dtype=fit.index.dtype)), -1, ) ), axis=1, ) elif fit_numbers is None and fit_indexes is None: fit_numbers = [1] # function to generate colormap that fades between colors def CustomCmap(from_rgb, to_rgb): # from color r,g,b r1, g1, b1 = from_rgb # to color r,g,b r2, g2, b2 = to_rgb cdict = { "red": ((0, r1, r1), (1, r2, r2)), "green": ((0, g1, g1), (1, g2, g2)), "blue": ((0, b1, b1), (1, b2, b2)), } cmap = LinearSegmentedColormap("custom_cmap", cdict) return cmap estimates = fit._calc_model() indexsplits = fit._orig_index fig = plt.figure() ax = fig.add_subplot(111) for m_i, m in enumerate(fit_numbers): if convertTodB is True: y = 10.0 * np.log10(estimates[m][~fit.mask[m]]) else: y = estimates[m][~fit.mask[m]] # plot data nindexes = len(indexsplits[m]) if nindexes > 1: mindate = np.datetime_as_string(indexsplits[m][0], unit=datetime_unit) maxdate = np.datetime_as_string(indexsplits[m][-1], unit=datetime_unit) if mindate == maxdate: label = f"{mindate} [{nindexes}]" else: label = f"({mindate} - {maxdate}) [{nindexes}]" else: label = np.datetime_as_string(indexsplits[m][0], unit=datetime_unit) xdata = np.rad2deg(fit.inc[m][~fit.mask[m]]) if convertTodB is True: ydata = 10.0 * np.log10(fit.data[m][~fit.mask[m]]) else: ydata = fit.data[m][~fit.mask[m]] # get color that will be applied to the next line drawn (dummy,) = ax.plot(xdata[0], ydata[0], ".", alpha=0.0) color = dummy.get_color() if hexbinQ is True: args = dict(gridsize=15, mincnt=1, linewidths=0.0, alpha=0.7) args.update(hexbinargs) # evaluate the hexbinplot once to get the maximum number of # datapoints within a single hexagonal (used for normalization) dummyargs = args.copy() dummyargs.update({"alpha": 0.0}) hb = ax.hexbin(xdata, ydata, **dummyargs) # generate colormap that fades from white to the color # of the plotted data (asdf.get_color()) cmap = CustomCmap([1.00, 1.00, 1.00], plt.cm.colors.hex2color(color)) # setup correct normalizing instance norm = Normalize(vmin=0, vmax=hb.get_array().max()) ax.hexbin(xdata, ydata, cmap=cmap, norm=norm, **args) # plot datapoints (asdf,) = ax.plot( xdata, ydata, ".", color=color, alpha=1.0, label=label, markersize=10, ) # plot results iii = fit.inc[m][~fit.mask[m]] ax.plot( np.rad2deg(iii[np.argsort(iii)]), y[np.argsort(iii)], "-", color="w", linewidth=3, ) ax.plot( np.rad2deg(iii[np.argsort(iii)]), y[np.argsort(iii)], "-", color=asdf.get_color(), linewidth=2, marker="x", ) ax.set_xlabel("$\\theta_0$ [deg]") ax.set_ylabel("$\\sigma_0$ [dB]") ax.legend(title="# Measurement") return fig def intermediate_results(self, fit=None, params=None, cmaps=None): """ a function to plot the intermediate-results (the data is only available if rtfits.performfit has been called with the argument intermediate_results=True!) Parameters ----------- fit : rtfits object the rtfits-object containing the fit-results params : list a list of parameter-names that are intended to be plotted as timeseries. cmaps : list a list of the colormaps used to plot the parameter variations Returns ------- f : matplotlib.figure the used matplotlib figure instance """ if fit is None: fit = self.fit try: fit.intermediate_results except AttributeError: assert False, ( "No intermediate results are found, you must run" + " performfit() with intermediate_results=True!" ) if params is None: params = fit.res_dict.keys() # constant parameters constparams = [key for key in params if len(fit.res_dict[key]) == 1] # timeseries-parameters tsparams = [i for i in params if i not in constparams] if cmaps is None: cmaps = [ "Reds", "Greens", "Blues", "Purples", "Oranges", "Greys", "YlOrBr", "YlOrRd", "OrRd", "PuRd", "RdPu", "BuPu", "GnBu", "PuBu", "YlGnBu", "PuBuGn", "BuGn", "YlGn", ] interparams = {} for i, valdict in enumerate(fit.intermediate_results["parameters"]): for key, val in valdict.items(): if key in interparams: interparams[key] += [[i, np.mean(val[0])]] else: interparams[key] = [[i, np.mean(val[0])]] intererrs = {} for i, valdict in enumerate(fit.intermediate_results["residuals"]): for key, val in valdict.items(): if key in intererrs: intererrs[key] += [[i, np.mean(val)]] else: intererrs[key] = [[i, np.mean(val)]] interjacs = {} for i, valdict in enumerate(fit.intermediate_results["jacobian"]): for key, val in valdict.items(): if key in interjacs: interjacs[key] += [[i, np.mean(val)]] else: interjacs[key] = [[i, np.mean(val)]] for i in [interparams, intererrs, interjacs]: for key, val in i.items(): i[key] = np.array(val).T interres_params = {} for key in params: interres_p = pd.concat( [ pd.DataFrame(valdict[key], fit.meandatetimes[key], columns=[i]) for i, valdict in enumerate(fit.intermediate_results["parameters"]) ], axis=1, ) interres_params[key] = interres_p f = plt.figure(figsize=(15, 10)) f.subplots_adjust(top=0.98, left=0.05, right=0.95) gs = GridSpec( 4, len(interjacs), # width_ratios=[1, 2], # height_ratios=[1, 2, 1] ) axsm = plt.subplot(gs[0, :]) axerr = plt.subplot(gs[1, :]) paramaxes, jacaxes = [], [] for i in range(len(interjacs)): paramaxes += [plt.subplot(gs[2, i])] jacaxes += [plt.subplot(gs[3, i])] smhandles, smlabels = [], [] nparam = 0 for [parameter, paramdf] in interres_params.items(): # plot only temporally varying parameters as timeseries if parameter not in tsparams: continue cmap = plt.get_cmap(cmaps[nparam]) for key, val in paramdf.items(): axsm.plot( val, c=cmap((float(key) / len(paramdf.keys()))), lw=0.25, marker=".", ms=2, ) # add colorbar axcb = f.add_axes( [ axsm.get_position().x1 - 0.01 * (nparam + 1), axsm.get_position().y0, 0.01, axsm.get_position().y1 - axsm.get_position().y0, ] ) cbbounds = [1] + list(np.arange(2, len(paramdf.keys()) + 1, 1)) cb = mpl.colorbar.ColorbarBase( axcb, cmap=cmap, orientation="vertical", boundaries=[0] + cbbounds + [cbbounds[-1] + 1], spacing="proportional", norm=mpl.colors.BoundaryNorm(cbbounds, cmap.N), ) axcb.text( 0.51, 0.5, parameter, rotation=90, fontsize=8, horizontalalignment="center", verticalalignment="center", ) if nparam > 0: cb.set_ticks([]) smhandles += [mpl.lines.Line2D([], [], color=cmap(0.9))] smlabels += [parameter] nparam += 1 axsm.legend(handles=smhandles, labels=smlabels, loc="upper left") axsmbounds = list(axsm.get_position().bounds) axsmbounds[2] = axsmbounds[2] - 0.015 * nparam axsm.set_position(axsmbounds) for [pax, jax, [key, val]] in zip(paramaxes, jacaxes, interparams.items()): if key not in interjacs: continue if key in tsparams: label = f"{key} (mean)" else: label = key pax.plot(*val, label=label, marker=".", ms=3, lw=0.5) pax.legend(loc="upper center") jax.plot( interjacs[key][0], interjacs[key][1], label=label, marker=".", ms=3, lw=0.5, ) jax.legend(loc="upper center") paramaxes[-1].text( 1.03, 0.5, "Parameter estimates", rotation=90, fontweight="bold", horizontalalignment="left", verticalalignment="center", transform=paramaxes[-1].transAxes, ) jacaxes[-1].text( 1.03, 0.5, "Jacobi determinant", rotation=90, fontweight="bold", horizontalalignment="left", verticalalignment="center", transform=jacaxes[-1].transAxes, ) for key, val in intererrs.items(): if key == "abserr": axerr.semilogy( val[0], np.abs(val[1]), label="absolute error", marker=".", ms=3, lw=0.5, c="r", ) axerr.legend(ncol=5, loc="upper left") if key == "relerr": axrelerr = axerr.twinx() axrelerr.semilogy( val[0], np.abs(val[1]), label="relative error", marker=".", ms=3, lw=0.5, c="g", ) axrelerr.legend(ncol=5, loc="upper right") return f def printsig0analysis( self, fit=None, range1=1, range2=None, use_index="groups", printfullt_0=True, printfulldata=True, dB=True, sig0=True, printcomponents1=True, printcomponents2=True, printparamnames=None, printparamstyles=dict(), ): """ A widget to analyze the results of a rt1.rtfits.Fits object. (In order to keep the widget responsive, a reference to the returns must be kept!) Parameters ---------- fit : rt1.rtfits.Fits object The used fit-object. range1, range2 : int, optional The number of consecutive measurements considered by the first/second slider. The default is (1 / None). use_index : str, optional Select the partition of the index accessible via the sliders. - if 'groups', the dataset will be grouped with respect to the parameter-dynamics - if 'dataset', each unique dataset-index is used printcomponents1, printcomponents2 : bool, optional Indicator if individual backscatter contributions (surface, volume, interaction) should be plotted or not. The default is (True, True). printfullt_0 : bool, optional Indicator if backscatter-contributions should be evaluated over the full incidence-angle range or just over the range coverd by the dataset. The default is True. printfulldata : bool, optional Indicator if the range of the whole dataset should be indicated by small dots or not. The default is True. dB : bool, optional Indicator if the backscatter-plot should be in linear-units or dB. The default is True. sig0 : bool, optional Indicator if the backscatter-plot should represent sigma0 or intensity. The default is True. printparamnames : list of str, optional A list of strings corresponding to the parameter-names whose results should be added to the plot. The default is None. printparamstyles : dict A dict with style-options (updating the default ones) passed to the plot for each parameter selected in printparamnames via: >>> plt.plot(x, y, **printparamstyles['parameter-name']) Returns ------- list a reference to the matplotlib-objects corresponding to: [figure, first_slider, (second_slider)]. """ if fit is None: fit = self.fit if printparamnames is None: printparamnames = fit.res_dict.keys() # gridspec for incidence-angle and parameter plots gs = GridSpec(3, 2, height_ratios=[0.65, 0.25, 0.1]) gs.update(top=0.98, left=0.1, right=0.9, bottom=0.025) # sub-gridspec for sliders gs2 = GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[2, :]) f = plt.figure(figsize=(10, 6)) # add plot-axes ax = f.add_subplot(gs[0, 0]) ax1 = f.add_subplot(gs[0, 1]) ax2 = f.add_subplot(gs[1, :]) # add slider axes slider_ax = f.add_subplot(gs2[0]) if range2 is not None: slider_bx = f.add_subplot(gs2[1]) ax.grid() ax1.grid() if use_index == "dataset": sig0_vals = fit.calc_model(return_components=True) sig0_vals["data"] = fit.dataset.sig sig0_vals["incs"] = fit.dataset.inc sig0_vals = sig0_vals.groupby(level=0).agg(list).to_dict(orient="list") sig0_vals = { key: rectangularize(val, return_masked=True) for key, val in sig0_vals.items() } sig0_vals["indexes"] = pd.to_datetime(fit.index) elif use_index == "groups": # calculate backscatter values and ensure correct index-order # ( in case a unordered dyn-dict is used) mask = fit.mask sig0_vals = fit._calc_model(return_components=True) # apply mask and convert to pandas dataframe sig0_vals = [np.ma.masked_array(con, mask) for con in sig0_vals] sig0_vals = dict(zip(["tot", "surf", "vol", "inter"], sig0_vals)) sig0_vals["data"] = np.ma.masked_array(fit.data, mask) sig0_vals["incs"] = np.ma.masked_array(fit.inc, mask) sig0_vals["indexes"] = pd.to_datetime(fit.meandatetimes_group) # convert to sig0 and dB if necessary sig0_vals_I_linear = dict() for key in ["tot", "surf", "vol", "inter", "data"]: if key not in sig0_vals: continue sig0_vals[key] = dBsig0convert( sig0_vals[key], sig0_vals["incs"], dB=dB, sig0=sig0, fitdB=fit.dB, fitsig0=fit.sig0, ) sig0_vals_I_linear[key] = dBsig0convert( sig0_vals[key], sig0_vals["incs"], dB=False, sig0=False, fitdB=dB, fitsig0=sig0, ) if printfullt_0 is True: inc = np.deg2rad(np.arange(1, 89, 1)) if use_index == "dataset": newsig0_vals = fit.calc( param=fit.res_df, inc=inc, fixed_param=fit.dataset[fit.fixed_dict.keys()], return_components=True, ) elif use_index == "groups": if len(fit.fixed_dict) > 0: # get the average value of the fixed-parameters # for each group usefixedparams = ( fit.dataset[fit.fixed_dict.keys()] .groupby(fit._groupindex, sort=False) .mean() .set_index(pd.to_datetime(fit.meandatetimes_group)) ) else: usefixedparams = dict() newsig0_vals = fit.calc( param=fit.res_df_group, inc=inc, fixed_param=usefixedparams, return_components=True, ) newsig0_vals = dict(zip(["tot", "surf", "vol", "inter"], newsig0_vals)) newsig0_vals["incs"] = np.broadcast_to(inc, newsig0_vals["tot"].shape) newsig0_vals_I_linear = dict() for key in ["tot", "surf", "vol", "inter"]: if key not in newsig0_vals: continue newsig0_vals[key] = dBsig0convert( newsig0_vals[key], newsig0_vals["incs"], fitdB=fit.dB, fitsig0=fit.sig0, dB=dB, sig0=sig0, ) newsig0_vals_I_linear[key] = dBsig0convert( newsig0_vals[key], newsig0_vals["incs"], dB=False, sig0=False, fitdB=dB, fitsig0=sig0, ) ax.set_xlim( [ -2 + np.rad2deg(np.ma.min(newsig0_vals["incs"])), 2 + np.rad2deg(np.ma.max(newsig0_vals["incs"])), ] ) ax.set_ylim( [ np.min( [ np.ma.min(newsig0_vals["tot"]), np.ma.min(sig0_vals["data"]), ] ), np.max( [ np.ma.max(newsig0_vals["tot"]), np.ma.max(sig0_vals["data"]), ] ), ] ) else: ax.set_xlim( [ -2 + np.rad2deg(np.ma.min(sig0_vals["incs"][0])), 2 + np.rad2deg(np.ma.max(sig0_vals["incs"][0])), ] ) ax.set_ylim( [ np.min( [ np.ma.min(sig0_vals["tot"]), np.ma.min(sig0_vals["data"]), ] ), np.max( [ np.ma.max(sig0_vals["tot"]), np.ma.max(sig0_vals["data"]), ] ), ] ) # ensure sort-order if use_index == "dataset": inc_sortp = np.argsort(fit.index) elif use_index == "groups": inc_sortp = np.argsort(fit.meandatetimes_group) for key, val in sig0_vals.items(): sig0_vals[key] = val[inc_sortp] for key, val in sig0_vals_I_linear.items(): sig0_vals_I_linear[key] = val[inc_sortp] for key, val in newsig0_vals.items(): newsig0_vals[key] = val[inc_sortp] for key, val in newsig0_vals_I_linear.items(): newsig0_vals_I_linear[key] = val[inc_sortp] # print full data points in the background if printfulldata is True: ax.plot( np.rad2deg(sig0_vals["incs"]), sig0_vals["data"], lw=0.0, marker=".", ms=0.5, color="k", alpha=0.5, ) # plot parameters as specified in printparamnames axparamplot = ax2 handles, labels = [], [] i = 0 pos = 1 - len(printparamnames) // 2 * 0.035 for key in printparamnames: try: style = dict(color="C" + str(i), marker=".", lw=0.75) style.update(printparamstyles.get(key, dict())) if i > 0: axparamplot = ax2.twinx() axparamplot.tick_params( axis="y", which="both", labelsize=5, length=2 ) pos += 0.035 axparamplot.spines["right"].set_position(("axes", pos)) axparamplot.tick_params( axis="y", which="both", labelsize=5, length=2 ) if key in fit.res_df: (l,) = axparamplot.plot( fit.res_df.sort_index()[key], label=key, **style ) elif key in fit.dataset: (l,) = axparamplot.plot( fit.dataset.sort_index()[key], label=key, **style ) else: print( f'parameter "{key}" not found in "fit.res_df"', 'and "fit.dataset"', ) continue # add handles and labels to legend handles += axparamplot.get_legend_handles_labels()[0] labels += axparamplot.get_legend_handles_labels()[1] # change color of axis to fit color of lines axparamplot.yaxis.label.set_color(l.get_color()) axparamplot.tick_params(axis="y", colors=l.get_color()) # shift twin-axes if necessary i += 1 except Exception: pass axparamplot.legend( handles=handles, labels=labels, loc="upper center", ncol=len(printparamnames), ) ax2.xaxis.set_minor_locator(mpl.dates.MonthLocator()) ax2.xaxis.set_minor_formatter(mpl.dates.DateFormatter("%m")) ax2.xaxis.set_major_locator(mpl.dates.YearLocator()) ax2.xaxis.set_major_formatter(mpl.dates.DateFormatter("\n%Y")) indicator_bounds = [ax2.get_ylim()[0] * 1.05, ax2.get_ylim()[1] * 0.95] # ----------------------------------------------------------- # plot lines def plotlines( dayrange, printcomponents, printfullt_0, styledict_dict, styledict_fullt0_dict, ): incs = np.rad2deg(sig0_vals["incs"]) lines = [] for day in np.arange(0, dayrange, 1): lines += ax.plot( incs[day], sig0_vals["tot"][day], **styledict_dict["tot"] ) if printcomponents: lines += ax.plot( incs[day], sig0_vals["surf"][day], **styledict_dict["surf"], ) lines += ax.plot( incs[day], sig0_vals["vol"][day], **styledict_dict["vol"], ) if fit.int_Q is True: lines += ax.plot( incs[day], sig0_vals["inter"][day], **styledict_dict["inter"], ) lines += ax.plot( incs[day], sig0_vals["data"][day], **styledict_dict["data"] ) lines += ax2.plot( [sig0_vals["indexes"][day]] * 2, indicator_bounds, **styledict_dict["indicator"], ) lines_frac = [] for day in np.arange(0, dayrange, 1): lintot = sig0_vals_I_linear["tot"][day] linsurf = sig0_vals_I_linear["surf"][day] linvol = sig0_vals_I_linear["vol"][day] if fit.int_Q is True: lininter = sig0_vals_I_linear["inter"][day] lines_frac += ax1.plot( incs[day], linsurf / lintot, **styledict_dict["surf"] ) lines_frac += ax1.plot( incs[day], linvol / lintot, **styledict_dict["vol"] ) if fit.int_Q is True: lines_frac += ax1.plot( incs[day], lininter / lintot, **styledict_dict["inter"] ) # plot full-incidence-angle lines linesfull = [] lines_frac_full = [] if printfullt_0 is True: newincs = np.rad2deg(newsig0_vals["incs"]) for day in np.arange(0, dayrange, 1): sortp = np.argsort(newincs[day]) linesfull += ax.plot( newincs[day][sortp], newsig0_vals["tot"][day][sortp], **styledict_fullt0_dict["tot"], ) if printcomponents: linesfull += ax.plot( newincs[day][sortp], newsig0_vals["surf"][day][sortp], **styledict_fullt0_dict["surf"], ) linesfull += ax.plot( newincs[day][sortp], newsig0_vals["vol"][day][sortp], **styledict_fullt0_dict["vol"], ) if fit.int_Q is True: linesfull += ax.plot( newincs[day][sortp], newsig0_vals["inter"][day][sortp], **styledict_fullt0_dict["inter"], ) lintot = newsig0_vals_I_linear["tot"][day] linsurf = newsig0_vals_I_linear["surf"][day] linvol = newsig0_vals_I_linear["vol"][day] if fit.int_Q is True: lininter = newsig0_vals_I_linear["inter"][day] lines_frac_full += ax1.plot( newincs[day][sortp], (linsurf / lintot)[sortp], **styledict_fullt0_dict["surf"], ) lines_frac_full += ax1.plot( newincs[day][sortp], (linvol / lintot)[sortp], **styledict_fullt0_dict["vol"], ) if fit.int_Q is True: lines_frac_full += ax1.plot( newincs[day][sortp], (lininter / lintot)[sortp], **styledict_fullt0_dict["inter"], ) # add unique legend entries ha, la = ax.get_legend_handles_labels() unila, wherela = np.unique(la, return_index=True) # sort legend entries sort_order = dict( data_1=0, data_2=1, total=2, surface=3, volume=4, interaction=5 ) halas = [ [ha, la] for ha, la in zip(np.array(la)[wherela], np.array(ha)[wherela]) ] halas.sort(key=lambda val: sort_order[val[0]]) ax.legend(handles=[i[1] for i in halas], labels=[i[0] for i in halas]) return lines, lines_frac, linesfull, lines_frac_full # plot first set of lines styletot = { "lw": 1, "marker": "o", "ms": 3, "color": "k", "label": "total", } stylevol = { "lw": 1, "marker": "o", "ms": 3, "color": "g", "markerfacecolor": "gray", "label": "volume", } stylesurf = { "lw": 1, "marker": "o", "ms": 3, "color": "y", "markerfacecolor": "gray", "label": "surface", } styleinter = { "lw": 1, "marker": "o", "ms": 3, "color": "c", "markerfacecolor": "gray", "label": "interaction", } styledata = { "lw": 0, "marker": "s", "ms": 5, "color": "k", "markerfacecolor": "gray", "label": "data_1", } styleindicator = {"c": "k"} stylefullt0tot = {"lw": 0.25, "color": "k"} stylefullt0vol = {"lw": 0.25, "color": "g"} stylefullt0surf = {"lw": 0.25, "color": "y"} stylefullt0inter = {"lw": 0.25, "color": "c"} styledict_dict = dict( zip( ["tot", "surf", "vol", "inter", "data", "indicator"], [ styletot, stylesurf, stylevol, styleinter, styledata, styleindicator, ], ) ) styledict_fullt0_dict = dict( zip( ["tot", "surf", "vol", "inter"], [ stylefullt0tot, stylefullt0surf, stylefullt0vol, stylefullt0inter, ], ) ) lines, lines_frac, linesfull, lines_frac_full = plotlines( range1, printcomponents1, printfullt_0, styledict_dict, styledict_fullt0_dict, ) if range2 is not None: # plot second set of lines styletot = { "lw": 1, "marker": "o", "ms": 3, "color": "r", "dashes": [5, 5], "markerfacecolor": "none", } stylevol = { "lw": 1, "marker": "o", "ms": 3, "color": "g", "dashes": [5, 5], "markerfacecolor": "r", } stylesurf = { "lw": 1, "marker": "o", "ms": 3, "color": "y", "dashes": [5, 5], "markerfacecolor": "r", } styleinter = { "lw": 1, "marker": "o", "ms": 3, "color": "c", "dashes": [5, 5], "markerfacecolor": "r", } styledata = { "lw": 0, "marker": "s", "ms": 5, "color": "r", "markerfacecolor": "none", "label": "data_2", } styleindicator = {"c": "gray", "ls": "--"} stylefullt0tot = {"lw": 0.25, "color": "r", "dashes": [5, 5]} stylefullt0vol = {"lw": 0.25, "color": "g", "dashes": [5, 5]} stylefullt0surf = {"lw": 0.25, "color": "y", "dashes": [5, 5]} stylefullt0inter = {"lw": 0.25, "color": "c", "dashes": [5, 5]} styledict_dict = dict( zip( ["tot", "surf", "vol", "inter", "data", "indicator"], [ styletot, stylesurf, stylevol, styleinter, styledata, styleindicator, ], ) ) styledict_fullt0_dict = dict( zip( ["tot", "surf", "vol", "inter"], [ stylefullt0tot, stylefullt0surf, stylefullt0vol, stylefullt0inter, ], ) ) lines2, lines_frac2, linesfull2, lines_frac_full2 = plotlines( range2, printcomponents2, printfullt_0, styledict_dict, styledict_fullt0_dict, ) # define function to update lines based on slider-input def animate( day0, lines, linesfull, lines_frac, lines_frac_full, dayrange, printcomponents, label, ): day0 = int(day0) label.set_position([day0, label.get_position()[1]]) if dayrange == 1: label.set_text(sig0_vals["indexes"][day0].strftime("%d. %b %Y %H:%M")) elif dayrange > 1: lday_0 = sig0_vals["indexes"][day0].strftime("%d. %b %Y %H:%M") lday_1 = sig0_vals["indexes"][day0 + dayrange - 1].strftime( "%d. %b %Y %H:%M" ) label.set_text(f"{lday_0} - {lday_1}") maxdays = len(sig0_vals["incs"]) i = 0 for day in np.arange(day0, day0 + dayrange, 1): if day >= maxdays: continue lines[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines[i].set_ydata(sig0_vals["tot"][day]) i += 1 if printcomponents: lines[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines[i].set_ydata(sig0_vals["surf"][day]) i += 1 lines[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines[i].set_ydata(sig0_vals["vol"][day]) if fit.int_Q is True: i += 1 lines[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines[i].set_ydata(sig0_vals["inter"][day]) i += 1 # update data measurements lines[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines[i].set_ydata(sig0_vals["data"][day]) i += 1 # update day-indicator line lines[i].set_xdata([sig0_vals["indexes"][day]] * 2) i += 1 i = 0 for day in np.arange(day0, day0 + dayrange, 1): if day >= maxdays: continue lintot = sig0_vals_I_linear["tot"][day] linsurf = sig0_vals_I_linear["surf"][day] linvol = sig0_vals_I_linear["vol"][day] if fit.int_Q is True: lininter = sig0_vals_I_linear["inter"][day] lines_frac[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines_frac[i].set_ydata(linsurf / lintot) i += 1 lines_frac[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines_frac[i].set_ydata(linvol / lintot) if fit.int_Q is True: i += 1 lines_frac[i].set_xdata(np.rad2deg(sig0_vals["incs"][day])) lines_frac[i].set_ydata(lininter / lintot) i += 1 if printfullt_0 is True: i = 0 for day in np.arange(day0, day0 + dayrange, 1): if day >= maxdays: continue day_inc_new = np.rad2deg(newsig0_vals["incs"][day]) sortp = np.argsort(day_inc_new) linesfull[i].set_xdata(day_inc_new[sortp]) linesfull[i].set_ydata(newsig0_vals["tot"][day][sortp]) i += 1 if printcomponents: linesfull[i].set_xdata(day_inc_new[sortp]) linesfull[i].set_ydata(newsig0_vals["surf"][day][sortp]) i += 1 linesfull[i].set_xdata(day_inc_new[sortp]) linesfull[i].set_ydata(newsig0_vals["vol"][day][sortp]) if fit.int_Q is True: i += 1 linesfull[i].set_xdata(day_inc_new[sortp]) linesfull[i].set_ydata(newsig0_vals["inter"][day][sortp]) i += 1 i = 0 for day in np.arange(day0, day0 + dayrange, 1): if day >= maxdays: continue day_inc_new = np.rad2deg(newsig0_vals["incs"][day]) sortp = np.argsort(day_inc_new) lintot = newsig0_vals_I_linear["tot"][day] linsurf = newsig0_vals_I_linear["surf"][day] linvol = newsig0_vals_I_linear["vol"][day] if fit.int_Q is True: lininter = newsig0_vals_I_linear["inter"][day] lines_frac_full[i].set_xdata(day_inc_new[sortp]) lines_frac_full[i].set_ydata((linsurf / lintot)[sortp]) i += 1 lines_frac_full[i].set_xdata(day_inc_new[sortp]) lines_frac_full[i].set_ydata((linvol / lintot)[sortp]) if fit.int_Q is True: i += 1 lines_frac_full[i].set_xdata(day_inc_new[sortp]) lines_frac_full[i].set_ydata((lininter / lintot)[sortp]) i += 1 return lines # define function to update slider-range based on zoom def updatesliderboundary(evt, slider): indexes = sig0_vals["indexes"] # Get the range for the new area xstart, ystart, xdelta, ydelta = ax2.viewLim.bounds xend = xstart + xdelta # convert to datetime-objects and ensure that they are in the # same time-zone as the sig0_vals indexes xend = mpl.dates.num2date(xend).replace(tzinfo=sig0_vals["indexes"].tzinfo) xstart = mpl.dates.num2date(xstart).replace( tzinfo=sig0_vals["indexes"].tzinfo ) zoomindex = np.where(np.logical_and(indexes > xstart, indexes < xend))[0] slider.valmin = zoomindex[0] - 1 slider.valmax = zoomindex[-1] + 1 slider.ax.set_xlim(slider.valmin, slider.valmax) # create the slider a_slider = Slider( slider_ax, # axes object for the slider "solid lines", # name of the slider parameter 0, # minimal value of parameter len(sig0_vals["tot"]) - 1, # maximal value of parameter valinit=0, # initial value of parameter valfmt="%i", # print slider-value as integer valstep=1, closedmax=True, ) a_slider.valtext.set_visible(False) slider_ax.set_xticks( np.arange( slider_ax.get_xlim()[0] - 1, slider_ax.get_xlim()[1] + 1, 1, dtype=int, ) ) slider_ax.tick_params(bottom=False, labelbottom=False) slider_ax.grid() label = slider_ax.text( 0, 0.5, f"{sig0_vals['indexes'][0].strftime('%d. %b %Y %H:%M')}", horizontalalignment="center", verticalalignment="center", fontsize=8, bbox=dict(facecolor="w", alpha=0.75, boxstyle="round,pad=.2"), ) # set slider to call animate function when changed a_slider.on_changed( partial( animate, lines=lines, linesfull=linesfull, lines_frac=lines_frac, lines_frac_full=lines_frac_full, dayrange=range1, printcomponents=printcomponents1, label=label, ) ) # update slider boundary with respect to zoom of second plot ax2.callbacks.connect( "xlim_changed", partial(updatesliderboundary, slider=a_slider) ) if range2 is not None: # here we create the slider b_slider = Slider( slider_bx, # axes object for the slider "dashed lines", # name of the slider parameter 0, # minimal value of parameter len(sig0_vals["tot"]) - 1, # maximal value of parameter valinit=0, # initial value of parameter valfmt="%i", valstep=1, closedmax=True, ) b_slider.valtext.set_visible(False) slider_bx.set_xticks( np.arange( slider_bx.get_xlim()[0] - 1, slider_bx.get_xlim()[1] + 1, 1, dtype=int, ) ) slider_bx.tick_params(bottom=False, labelbottom=False) slider_bx.grid() label2 = slider_bx.text( 0, 0.5, f"{sig0_vals['indexes'][0].strftime('%d. %b %Y %H:%M')}", horizontalalignment="center", verticalalignment="center", fontsize=8, bbox=dict(facecolor="w", alpha=0.75, boxstyle="round,pad=.2"), ) b_slider.on_changed( partial( animate, lines=lines2, linesfull=linesfull2, lines_frac=lines_frac2, lines_frac_full=lines_frac_full2, dayrange=range2, printcomponents=printcomponents2, label=label2, ) ) ax2.callbacks.connect( "xlim_changed", partial(updatesliderboundary, slider=b_slider) ) # a reference to the sliders must be returned in order to # remain interactive if range2 is not None: return f, a_slider, b_slider else: return f, a_slider def analyzemodel( self, fit=None, defdict=None, inc=None, labels=None, dB=True, sig0=True, int_Q=None, fillcomponents=True, ): """ Analyze the range of backscatter for a given model-configuration based on the defined parameter-ranges Parameters ---------- fit : rt1.rtfits.Fits, optional the fits-object to use The default is None. defdict : dict, optional A defdict used to define the rt1-configuration. (see rt1.rtfits.Fits for details). If none, the defdict provided in the fits-object will be used. The default is None. inc : array-like, optional The incidence-angles to be used in the calculation. If None, `np.deg2rad(np.linspace(1, 89, 100))` is used The default is None labels : dict, optional A dict with labels that will be used to replace the parameter-names. (e.g. {'parameter' : 'parameter_label', ....}) The default is {}. dB : bool, optional Indicator if the plot is in linear-units or dB. The default is True. sig0 : bool, optional Indicator if intensity- or sigma-0 values should be plotted. ( sig0 = 4 pi cos(theta) I ). The default is True. int_Q : bool, optional Indicator if the interaction-term should be evaluated. The default is False. fillcomponents : bool, optional Indicator if the variabilities of the components should be indicated by fillings. The default is True. Returns ------- f, slider, buttons the matplotlib figure, slider and button instances """ if fit is None: fit = self.fit res_dict = getattr(fit, "res_dict", None) if defdict is None: defdict = fit.defdict if inc is None: inc = np.deg2rad(np.linspace(1, 89, 100)) if labels is None: labels = dict() if int_Q is None: int_Q = fit.int_Q # get parameter ranges from defdict and fit minparams, maxparams, startparams, fixparams = {}, {}, {}, {} for key, val in defdict.items(): if val[0] is True: minparams[key] = val[3][0][0] maxparams[key] = val[3][1][0] # try to use fitted-values as start values for the parameters if res_dict is not None and key in res_dict: startparams[key] = np.mean(res_dict[key][0]) else: startparams[key] = val[1] if val[0] is False: if isinstance(val[1], (int, float)): # don't add constants to startparams (they are # directly inserted into the functions) # startparams[key] = val[1] fixparams[key] = val[1] elif val[1] == "auxiliary": assert key in fit.dataset or key in fit.fixed_dict, ( f"auxiliary dataset for {key} " + "not found in fit.dataset or " + "fit.fixed_dict" ) if key in fit.dataset: minparams[key] = fit.dataset[key].min() maxparams[key] = fit.dataset[key].max() startparams[key] = fit.dataset[key].mean() elif key in fit.fixed_dict: minparams[key] = fit.fixed_dict[key].min() maxparams[key] = fit.fixed_dict[key].max() startparams[key] = fit.fixed_dict[key].mean() if "bsf" not in defdict: startparams["bsf"] = fit.R.bsf fixparams["bsf"] = fit.R.bsf modelresult = dict( zip(["tot", "surf", "vol", "inter"], fit.calc(startparams, inc=inc)) ) # convert to sig0 and dB if required for key, val in modelresult.items(): modelresult[key] = dBsig0convert(val[0], inc, dB, sig0, fit.dB, fit.sig0) f = plt.figure(figsize=(12, 9)) f.subplots_adjust(top=0.93, right=0.98, left=0.07) # generate figure grid and populate with axes gs = GridSpec( 1 + len(minparams) // 2, 1 + 3, height_ratios=[8] + [1] * (len(minparams) // 2), width_ratios=[0.75, 1, 1, 1], ) gs.update(wspace=0.3) gsslider = GridSpec( 1 + len(minparams) // 2, 1 + 3, height_ratios=[8] + [1] * (len(minparams) // 2), width_ratios=[0.75, 1, 1, 1], ) gsslider.update(wspace=0.3, bottom=0.05) gsbutton = GridSpec( 1 + len(minparams) // 2, 1 + 3, height_ratios=[8] + [1] * (len(minparams) // 2), width_ratios=[0.75, 1, 1, 1], ) gsbutton.update(hspace=0.75, wspace=0.1, bottom=0.05) ax = f.add_subplot(gs[0, 0:]) paramaxes = {} col = 0 for i, key in enumerate(minparams): if i % 3 == 0: col += 1 paramaxes[key] = f.add_subplot(gsslider[col, 1 + i % 3]) buttonax = f.add_subplot(gsbutton[1:, 0]) # hide frame of button-axes buttonax.axis("off") # add values of fixed parameters if len(fixparams) > 0: ax.text( 0.01, 0.98, "fixed parameters:\n" + "".join( [f"{key}={round(val, 5)} " for key, val in fixparams.items()] ), ) # overplot data used in fit try: ax.plot( fit.dataset.inc, dBsig0convert( fit.dataset.sig, fit.dataset.inc, dB, sig0, fit.dB, fit.sig0, ), zorder=0, marker=".", alpha=0.5, lw=0, markerfacecolor="none", markeredgecolor="k", ) except Exception: pass # plot initial curves (ltot,) = ax.plot(inc, modelresult["tot"], "k", label="total contribution") (lsurf,) = ax.plot(inc, modelresult["surf"], "b", label="surface contribution") (lvol,) = ax.plot(inc, modelresult["vol"], "g", label="volume contribution") if int_Q is True: (lint,) = ax.plot( inc, modelresult["inter"], "y", label="interaction contribution", ) if dB is True: ax.set_ylim(-35, 5) ax.xaxis.set_major_locator(plt.MaxNLocator(10)) ax.xaxis.set_major_formatter( plt.FuncFormatter(lambda x, y: f"{np.rad2deg(x):.1f}") ) # a legend for the lines leg0 = ax.legend(ncol=4, bbox_to_anchor=(0.5, 1.1), loc="upper center") # add the line-legend as individual artist ax.add_artist(leg0) if dB is True and sig0 is True: ax.set_ylabel(r"$\sigma_0$ [dB]") if dB is True and sig0 is False: ax.set_ylabel(r"$I/I_0$ [dB]") if dB is False and sig0 is True: ax.set_ylabel(r"$\sigma_0$") if dB is False and sig0 is False: ax.set_ylabel(r"$I/I_0$") ax.set_xlabel(r"$\theta_0$ [deg]") # create the slider for the parameter paramslider = {} buttonlabels = [] for key, val in minparams.items(): # replace label of key with provided label if key in labels: keylabel = labels[key] else: keylabel = key buttonlabels += [keylabel] startval = startparams[key] paramslider[key] = Slider( paramaxes[key], # axes object for the slider keylabel, # name of the slider minparams[key], # minimal value maxparams[key], # maximal value startval, # initial value # valfmt="%i" # slider-value as integer color="gray", ) paramslider[key].label.set_position([0.05, 0.5]) paramslider[key].label.set_bbox( dict(boxstyle="round,pad=0.5", facecolor="w") ) paramslider[key].label.set_horizontalalignment("left") paramslider[key].valtext.set_position([0.8, 0.5]) buttons = CheckButtons(buttonax, buttonlabels, [False for i in buttonlabels]) params = startparams.copy() # define function to update lines based on slider-input def animate(value, key): # params = copy.deepcopy(startparams) params[key] = value modelresult = dict( zip(["tot", "surf", "vol", "inter"], fit.calc(params, inc=inc)) ) # convert to sig0 and dB if required for key, val in modelresult.items(): modelresult[key] = dBsig0convert( val[0], inc, dB, sig0, fit.dB, fit.sig0 ) # update the data ltot.set_ydata(modelresult["tot"].T) lsurf.set_ydata(modelresult["surf"].T) lvol.set_ydata(modelresult["vol"].T) if int_Q is True: lint.set_ydata(modelresult["inter"].T) # poverprint boundaries hatches = [r"//", r"\\ ", "+", "oo", "--", ".."] colors = ["C" + str(i) for i in range(10)] ax.collections.clear() legendhandles = [] for i, [key_i, key_Q] in enumerate(printvariationQ.items()): # replace label of key_i with provided label if key_i in labels: keylabel = labels[key_i] else: keylabel = key_i # reset color of text-backtround paramslider[key_i].label.get_bbox_patch().set_facecolor("w") if key_Q is True: # set color of text-background to hatch-color # paramslider[key_i].label.set_color(colors[i%len(colors)]) paramslider[key_i].label.get_bbox_patch().set_facecolor( colors[i % len(colors)] ) fillparams = params.copy() fillparams[key_i] = minparams[key_i] # don't use bsf=1 since no vegetation-term would be present if fillparams.get("bsf", 0.0) == 1.0: fillparams["bsf"] = 0.999 modelresultmin = dict( zip( ["tot", "surf", "vol", "inter"], fit.calc(fillparams, inc=inc), ) ) # convert to sig0 and dB if required for key, val in modelresultmin.items(): modelresultmin[key] = dBsig0convert( val[0], inc, dB, sig0, fit.dB, fit.sig0 ) fillparams[key_i] = maxparams[key_i] # don't use bsf=1 since no vegetation-term would be present if fillparams.get("bsf", 0.0) == 1.0: fillparams["bsf"] = 0.999 modelresultmax = dict( zip( ["tot", "surf", "vol", "inter"], fit.calc(fillparams, inc=inc), ) ) # convert to sig0 and dB if required for key, val in modelresultmax.items(): modelresultmax[key] = dBsig0convert( val[0], inc, dB, sig0, fit.dB, fit.sig0 ) legendhandles += [ ax.fill_between( inc, modelresultmax["tot"], modelresultmin["tot"], facecolor="none", hatch=hatches[i % len(hatches)], edgecolor=colors[i % len(colors)], label="total variability (" + keylabel + ")", ) ] if fillcomponents is True: legendhandles += [ ax.fill_between( inc, modelresultmax["surf"], modelresultmin["surf"], color="b", alpha=0.1, label="surf variability (" + keylabel + ")", ) ] legendhandles += [ ax.fill_between( inc, modelresultmax["vol"], modelresultmin["vol"], color="g", alpha=0.1, label="vol variability (" + keylabel + ")", ) ] if int_Q is True: legendhandles += [ ax.fill_between( inc, modelresultmax["inter"], modelresultmin["inter"], color="y", alpha=0.1, label="int variability (" + keylabel + ")", ) ] # a legend for the hatches leg1 = ax.legend( handles=legendhandles, labels=[i.get_label() for i in legendhandles], ) if len(legendhandles) == 0: leg1.remove() printvariationQ = {key: False for key in minparams} def buttonfunc(label): # if labels of the buttons have been changed by the labels-argument # set the name to the corresponding key (= the actual parameter name) for key, val in labels.items(): if label == val: label = key # ax.collections.clear() if printvariationQ[label] is True: ax.collections.clear() printvariationQ[label] = False elif printvariationQ[label] is False: printvariationQ[label] = True animate(paramslider[label].val, key=label) plt.draw() buttons.on_clicked(buttonfunc) for key, slider in paramslider.items(): slider.on_changed(partial(animate, key=key)) # define textboxes that allow changing the slider-boundaries bounds = dict( zip( minparams.keys(), np.array([list(minparams.values()), list(maxparams.values())]).T, ) ) def submit(val, key, minmax): slider = paramslider[key] if minmax == 0: bounds[key][0] = float(val) slider.valmin = float(val) slider.ax.set_xlim(slider.valmin, None) minparams[key] = float(val) if minmax == 1: bounds[key][1] = float(val) slider.valmax = float(val) slider.ax.set_xlim(None, slider.valmax) maxparams[key] = float(val) # call animate to update ranges animate(params[key], key=key) f.canvas.draw_idle() plt.draw() from matplotlib.widgets import TextBox textboxes_buttons = {} for i, [key, val] in enumerate(paramslider.items()): axbox0 = plt.axes( [ val.ax.get_position().x0, val.ax.get_position().y1, 0.05, 0.025, ] ) text_box0 = TextBox(axbox0, "", initial=str(round(bounds[key][0], 4))) text_box0.on_submit(partial(submit, key=key, minmax=0)) axbox1 = plt.axes( [ val.ax.get_position().x1 - 0.05, val.ax.get_position().y1, 0.05, 0.025, ] ) text_box1 = TextBox(axbox1, "", initial=str(round(bounds[key][1], 4))) text_box1.on_submit(partial(submit, key=key, minmax=1)) textboxes_buttons[key + "_min"] = text_box0 textboxes_buttons[key + "_max"] = text_box1 textboxes_buttons["buttons"] = buttons return f, paramslider, textboxes_buttons def intermediate_residuals( self, fit=None, grp="M", err="relerr", label_formatter=None, plottype="3D", iter_slice=slice(2, None), f_gs=None, colorbar=True, project_contour=False, fmt="%d.%m.%y %H:%M:%S", axtitle=None, cmap="coolwarm", ): """ generate a 2D or 3D plot of the intermediate residuals during the fit Parameters ---------- fit : rt1.rtfits.Fits object the rtfits object to use. grp : `str` or `tuple` the grouping to use, you can either provide - a `string` - if 'groups', the parameter-groups will be used - if 'dataset', the unique dataset-index values will be used - all other strings will be interpreted as a pandas datetime offset string (e.g. like 'D', 'M', '10D' etc.) - a `tuple` - it is interpreted as (key, values or bins) where the key must correspond to a column in fit.dataset or fit.res_df and the second entry is one of the following (see pandas.cut) - `int`, e.g. the number of bins to use - `array-like` e.g. the group-boundaries to use Note that only dynamic parameters can be used here since a constant can not be grouped! The default is 'M' err : str, optional - 'abserr' for absolute errors - 'relerr' for relative errors. The default is 'relerr'. label_formatter : callable, optional A formatter that will be applied to the group-column. See "matplotlib.ticker.FuncFormatter" for details. The default is None. plottype : str, optional either '2D' or '3D'. The default is '3D'. iter_slice : slice, optional a slice to display only a part of the iterations. The default is slice(2, None). f_gs : (matplotlib.figure, matpltolib.gridspec), optional a figure and gridspec object to be used instead of generating a new figure. The default is None. colorbar : bool, optional indicator if a colorbar should be generated. The default is True. project_contour : bool, optional only if plottype = '3D'. Indicator if the surface should be projected to the bottom or not. The default is False. fmt : str, optional only if grp is a string. the datetime-format used for the labels The default is '%d.%m.%y %H:%M:%S'. axtitle : str, optional The axes-label (if None, the provided key will be used) The default is None Returns ------- ax : matplotlib.axes the matplotlib axes used """ if fit is None: fit = self.fit try: fit.intermediate_results except AttributeError: assert False, ( "No intermediate results are found, you must run" + " performfit() with intermediate_results=True!" ) # get the step size of the iterator to correctly assign the labels if iter_slice and iter_slice.step: iterstep = iter_slice.step else: iterstep = 1 if isinstance(grp, tuple): if grp[0] in fit.res_df: grps = pd.cut( fit.res_df[grp[0]].reindex(fit.dataset.index), grp[1], include_lowest=True, ) elif grp[0] in fit.dataset: grps = pd.cut(fit.dataset[grp[0]], grp[1], include_lowest=True) grpvals = grps.cat.categories.mid.values ymin, ymax = grps.min().left, grps.max().right # group the residuals with respect to the defined group resarr = np.abs( [ pd.DataFrame(i[err], fit.dataset.index) .groupby(grps) .mean() .values.flatten() for i in fit.intermediate_results["residuals"][iter_slice] ] ) if axtitle is None: use_label = grp[0] else: use_label = axtitle elif isinstance(grp, str): if grp == "groups": # group the residuals with respect to the defined group grplabels = pd.to_datetime(fit.meandatetimes_group) resarr = np.abs( [ pd.DataFrame(i[err], fit.dataset.index) .groupby(fit._groupindex) .mean() .values.flatten() for i in fit.intermediate_results["residuals"][iter_slice] ] ) grpvals = np.arange(resarr.shape[1]) ymin, ymax = grpvals.min() - 0.5, grpvals.max() + 0.5 def label_formatter(x, pos=None): if x in grpvals: return grplabels[int(x)].strftime(fmt) else: return "" use_label = "" elif grp == "dataset": # group the residuals with respect to the dataset-index grplabels = pd.to_datetime(fit.index) resarr = np.abs( [ pd.DataFrame(i[err], fit.dataset.index) .groupby(level=0) .mean() .values.flatten() for i in fit.intermediate_results["residuals"][iter_slice] ] ) grpvals = np.arange(resarr.shape[1]) ymin, ymax = grpvals.min() - 0.5, grpvals.max() + 0.5 def label_formatter(x, pos=None): if x in grpvals: return grplabels[int(x)].strftime(fmt) else: return "" use_label = "" else: # group the residuals with respect to the defined group grplabels = ( pd.DataFrame( fit.intermediate_results["residuals"][0][err], fit.dataset.index, ) .groupby(pd.Grouper(freq=grp)) .mean() .index ) resarr = np.abs( [ pd.DataFrame(i[err], fit.dataset.index) .groupby(pd.Grouper(freq=grp)) .mean() .values.flatten() for i in fit.intermediate_results["residuals"][iter_slice] ] ) grpvals = np.arange(resarr.shape[1]) ymin, ymax = grpvals.min() - 0.5, grpvals.max() + 0.5 def label_formatter(x, pos=None): if x in grpvals: return grplabels[int(x)].strftime(fmt) else: return "" use_label = "" # mask for nan-values resarr = np.ma.masked_array(resarr, np.isnan(resarr)) xvals = np.arange(1, len(fit.intermediate_results["residuals"]) + 1)[iter_slice] yvals = np.sort(pd.unique(grpvals)) if plottype == "3D": # generate a 3D surface plot Y, X = np.meshgrid(yvals, xvals) if f_gs is None: fig = plt.figure(figsize=(10, 8)) ax = fig.add_subplot(111, projection="3d") else: fig = f_gs[0] ax = fig.add_subplot(f_gs[1], projection="3d") ax.grid() for a in (ax.xaxis, ax.yaxis, ax.zaxis): a.pane.set_color("none") if project_contour: # project a contour-plot to the bottom ax.contourf( X, Y, resarr, zdir="z", offset=-(np.nanmax(resarr) - np.nanmin(resarr)) * 0.1, cmap=plt.cm.coolwarm, alpha=0.5, extent=[xvals.min() - iterstep, xvals.max(), ymin, ymax], origin="lower", ) ax.set_zlim( np.nanmin(resarr) - (np.nanmax(resarr) - np.nanmin(resarr)) * 0.1, np.nanmax(resarr), ) # plot a 3D surface surf = ax.plot_surface( X, Y, resarr, cmap=cmap, linewidth=0, antialiased=True, vmin=np.nanmin(resarr), vmax=np.nanmax(resarr), rcount=500, ccount=500, alpha=1, ) # find points that are surrounded by a mask and # plot a connection-line and scatterpoints maskp = [ i == (True, False, True) for i in pairwise([True, *np.all(resarr.mask, axis=0), True], 3) ] for i in np.arange(len(maskp))[maskp]: ax.plot_wireframe( X.T[[i]], Y.T[[i]], resarr.T[[i]], linewidth=0.25, antialiased=False, linestyle=(0, (10, 10)), alpha=0.25, color="k", ) surf = ax.scatter( X.T[i], Y.T[i], resarr.T[i], c=resarr.T[i], cmap=cmap, vmin=np.nanmin(resarr), vmax=np.nanmax(resarr), ) # incorporate the ticker if provided if label_formatter is not None: ax.yaxis.set_major_formatter(ticker.FuncFormatter(label_formatter)) elif plottype == "2D": # generate a 2D imshow plot if f_gs is None: fig = plt.figure(figsize=(10, 8)) ax = fig.add_subplot(111) else: fig = f_gs[0] ax = f_gs[0].add_subplot(f_gs[1]) surf = ax.imshow( resarr.T, cmap=cmap, vmin=np.nanmin(resarr), vmax=np.nanmax(resarr), extent=[ xvals.min() - 0.5 * iterstep, xvals.max() + 0.5 * iterstep, ymin, ymax, ], origin="lower", aspect="auto", ) def xformatter(x, pos=None): if x in xvals: return int(x) else: return "" # show only relevant tick-labels on x-axis ax.xaxis.set_major_formatter(ticker.FuncFormatter(xformatter)) if label_formatter is not None: ax.yaxis.set_major_formatter(ticker.FuncFormatter(label_formatter)) ax.set_ylim(ymin, ymax) if colorbar is True: cb = plt.colorbar(surf) if err == "abserr": cb.set_label(r"Absolute error $(x_{fit} - x_{data})$") if err == "relerr": cb.set_label( r"Relative error $\frac{(x_{fit} - x_{data})}{x_{data}}$" ) ax.set_ylabel(use_label) ax.set_xlabel("# fit-iteration") fig.tight_layout() return fig
apache-2.0
glennq/scikit-learn
sklearn/model_selection/_validation.py
3
37522
""" The :mod:`sklearn.model_selection._validation` module includes classes and functions to validate the model. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings import numbers import time import numpy as np import scipy.sparse as sp from ..base import is_classifier, clone from ..utils import indexable, check_random_state, safe_indexing from ..utils.fixes import astype from ..utils.validation import _is_arraylike, _num_samples from ..utils.metaestimators import _safe_split from ..externals.joblib import Parallel, delayed, logger from ..metrics.scorer import check_scoring from ..exceptions import FitFailedWarning from ._split import check_cv __all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score', 'learning_curve', 'validation_curve'] def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS [ 0.33150734 0.08022311 0.03531764] See Also --------- :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv.split(X, y, groups)) return np.array(scores)[:, 0] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scorer : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = '' else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)") else: fit_time = time.time() - start_time test_score = _score(estimator, X_test, y_test, scorer) score_time = time.time() - start_time - fit_time if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(score_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score, test_score] if return_train_score else [test_score] if return_n_test_samples: ret.append(_num_samples(X_test)) if return_times: ret.extend([fit_time, score_time]) if return_parameters: ret.append(parameters) return ret def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: # e.g. unwrap memmapped scalars score = score.item() except ValueError: # non-scalar? pass if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', method='predict'): """Generate cross-validated estimates for each input data point Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' method : string, optional, default: 'predict' Invokes the passed method name of the passed estimator. Returns ------- predictions : ndarray This is the result of calling ``method`` Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y) """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # Ensure the estimator has implemented the passed decision function if not callable(getattr(estimator, method)): raise AttributeError('{} not implemented in estimator' .format(method)) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) prediction_blocks = parallel(delayed(_fit_and_predict)( clone(estimator), X, y, train, test, verbose, fit_params, method) for train, test in cv.split(X, y, groups)) # Concatenate the predictions predictions = [pred_block_i for pred_block_i, _ in prediction_blocks] test_indices = np.concatenate([indices_i for _, indices_i in prediction_blocks]) if not _check_is_permutation(test_indices, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') inv_test_indices = np.empty(len(test_indices), dtype=int) inv_test_indices[test_indices] = np.arange(len(test_indices)) # Check for sparse predictions if sp.issparse(predictions[0]): predictions = sp.vstack(predictions, format=predictions[0].format) else: predictions = np.concatenate(predictions) return predictions[inv_test_indices] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : string Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method' test : array-like This is the value of the test parameter """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) func = getattr(estimator, method) predictions = func(X_test) return predictions, test def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray integer array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, dtype=bool) hit[indices] = True if not np.all(hit): return False return True def _index_param_value(X, v, indices): """Private helper function for parameter value indexing.""" if not _is_arraylike(v) or _num_samples(v) != _num_samples(X): # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices) def permutation_test_score(estimator, X, y, groups=None, cv=None, n_permutations=100, n_jobs=1, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. groups : array-like, with shape (n_samples,), optional Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def _permutation_test_score(estimator, X, y, groups, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv.split(X, y, groups): estimator.fit(X[train], y[train]) avg_score.append(scorer(estimator, X[test], y[test])) return np.mean(avg_score) def _shuffle(y, groups, random_state): """Return a shuffled copy of y eventually shuffle among same groups.""" if groups is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(groups)) for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) return y[indices] def learning_curve(estimator, X, y, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=1, pre_dispatch="all", verbose=0, shuffle=False, random_state=None): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : boolean, optional, default: False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. shuffle : boolean, optional Whether to shuffle training data before taking prefixes of it based on``train_sizes``. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. ------- train_sizes_abs : array, shape = (n_unique_ticks,), dtype int Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) cv_iter = cv.split(X, y, groups) # Make a list since we will be iterating multiple times over the folds cv_iter = list(cv_iter) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if shuffle: rng = check_random_state(random_state) cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose) for train, test in cv_iter) else: train_test_proportions = [] for train, test in cv_iter: for n_train_samples in train_sizes_abs: train_test_proportions.append((train[:n_train_samples], test)) out = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=None, return_train_score=True) for train, test in train_test_proportions) out = np.array(out) n_cv_folds = out.shape[0] // n_unique_ticks out = out.reshape(n_cv_folds, n_unique_ticks, 2) out = np.asarray(out).transpose((2, 1, 0)) return train_sizes_abs, out[0], out[1] def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like, shape (n_ticks,), dtype float or int Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array, shape (n_unique_ticks,), dtype int Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.float): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = astype(train_sizes_abs * n_max_training_samples, dtype=np.int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores = [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes) train_scores.append(_score(estimator, X_train, y_train, scorer)) test_scores.append(_score(estimator, X_test, y_test, scorer)) return np.array((train_scores, test_scores)).T def validation_curve(estimator, X, y, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=1, pre_dispatch="all", verbose=0): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. param_name : string Name of the parameter that will be varied. param_range : array-like, shape (n_values,) The values of the parameter that will be evaluated. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. Returns ------- train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) out = parallel(delayed(_fit_and_score)( estimator, X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=None, return_train_score=True) for train, test in cv.split(X, y, groups) for v in param_range) out = np.asarray(out) n_params = len(param_range) n_cv_folds = out.shape[0] // n_params out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0)) return out[0], out[1]
bsd-3-clause
xulesc/pytsa
model_maker.py
1
3953
# -*- coding: utf-8 -*- """ Created on Sat Jun 14 18:37:21 2014 @author: xule """ ## # -*- coding: utf-8 -*- """ Created on Thu Jun 5 09:12:15 2014 @author: anuj """ ## change matplotlib to use X-backend #import matplotlib #matplotlib.use('Agg') import csv from time import time import cPickle from sklearn import preprocessing from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.naive_bayes import GaussianNB #from sklearn.svm import SVC from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_recall_curve from sklearn.metrics import auc from collections import defaultdict trg_file = 'straining.1600000.processed.noemoticon.utf8.csv' senti_data = 'model/SentiWordNet_3.0.0_20130122.txt' ## MODEL FILES ## classifier_file = 'model/nb/classifier.pkl' vectorizer_file = 'model/nb/vectorizer.pkl' lsa_file = 'model/nb/lsa.pkl' senti_file = 'model/senti.pkl' ## min_max_scaler = preprocessing.MinMaxScaler() csv_reader = lambda x: csv.reader(open(x, 'rb'), delimiter=',', quotechar='"') def read_data(reader, X_col = 5, Y_col = 0): return zip(*map(lambda x: (x[X_col], int(x[Y_col])/4), reader)) def prep_fv(Xin, vectorizer, lsa): return vectorizer.fit_transform(Xin).todense() def cm_score(cm): a, b = cm[0]; c, d = cm[1] precision = 1.0 * a / (a + b) npv = 1.0 * d / (c + d) sensitivity = 1.0 * a / (a + c) specificity = 1.0 * d / (b + d) acc = 1.0 * (a + d) / (a + b + c + d) return 'precision = %f, npv = %f, sensitivity = %f, specificity = %f, acc = %f'%(precision,npv,sensitivity,specificity,acc) ## t0 = time() clf = GaussianNB() vectorizer = TfidfVectorizer(max_df=0.5, max_features=100, stop_words='english', use_idf=True) lsa = TruncatedSVD(2) ## TRAINING ## print("Training") X_trg, Y_trg = read_data(csv_reader(trg_file)) X = prep_fv(X_trg, vectorizer, lsa) #pl.boxplot(X) clf.fit(X, Y_trg) ## Saving ## print("Saving model") cPickle.dump(clf, open(classifier_file, 'wb')) cPickle.dump(vectorizer, open(vectorizer_file, 'wb')) cPickle.dump(lsa, open(lsa_file, 'wb')) ## Loading ## print("Loading model") clf = cPickle.load(open(classifier_file, 'rb')) vectorizer = cPickle.load(open(vectorizer_file, 'rb')) lsa = cPickle.load(open(lsa_file, 'rb')) ## TESTING ## print("Testing") X_tst, Y_tst = read_data(csv_reader(trg_file)) T = prep_fv(X_tst, vectorizer, lsa) cm = confusion_matrix(clf.predict(T), Y_tst) print(cm_score(cm)) # Compute Precision-Recall and plot curve probas_ = clf.predict_proba(T) precision, recall, thresholds = precision_recall_curve(Y_tst, probas_[:, 1]) area = auc(recall, precision) print("Area Under Curve: %0.2f" % area) ## senti_model = defaultdict(list) for line in open(senti_data, 'r'): if line.find('#') == 0: continue data = line.split('\t') if len(data[0].strip()) == 0: continue p_score = float(data[2]) n_score = float(data[3]) words = map(lambda a: a[:a.index('#')], data[4].split(' ')) for word in words: senti_model[word].append([p_score, n_score]) cPickle.dump(senti_model, open(senti_file, 'wb')) ## #Integer utermPaperCount = positiveUniverseTermCounts.get(term); #if (utermPaperCount != null) { # final double a = Math.exp(Math.log(termPaperCount) # - Math.log(paperUniverseCount)); # final double b = Math.exp(Math.log(termPaperCount) # + Math.log(commonPaperCount) - 2 # * Math.log(paperUniverseCount)); # final double d = Math.exp(Math.log(a - b) - 0.5 * Math.log(b)); # final double f = (Math.log(termPaperCount) / Math.log(2) + Math # .log(paperUniverseCount) / Math.log(2)) # - (Math.log(utermPaperCount) / Math.log(2) + Math # .log(commonPaperCount) / Math.log(2)); # if (Math.abs(d) > 0.15) # positivePMI += d * f; # log.trace("positivePMI: " + positivePMI); # continue; #} ##
gpl-3.0
glennq/scikit-learn
benchmarks/bench_tree.py
119
3647
""" To run this, you'll need to have installed. * scikit-learn Does two benchmarks First, we fix a training set, increase the number of samples to classify and plot number of classified samples as a function of time. In the second benchmark, we increase the number of dimensions of the training set, classify a sample and plot the time taken as a function of the number of dimensions. """ import numpy as np import matplotlib.pyplot as plt import gc from datetime import datetime # to store the results scikit_classifier_results = [] scikit_regressor_results = [] mu_second = 0.0 + 10 ** 6 # number of microseconds in a second def bench_scikit_tree_classifier(X, Y): """Benchmark with scikit-learn decision tree classifier""" from sklearn.tree import DecisionTreeClassifier gc.collect() # start time tstart = datetime.now() clf = DecisionTreeClassifier() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_classifier_results.append( delta.seconds + delta.microseconds / mu_second) def bench_scikit_tree_regressor(X, Y): """Benchmark with scikit-learn decision tree regressor""" from sklearn.tree import DecisionTreeRegressor gc.collect() # start time tstart = datetime.now() clf = DecisionTreeRegressor() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_regressor_results.append( delta.seconds + delta.microseconds / mu_second) if __name__ == '__main__': print('============================================') print('Warning: this is going to take a looong time') print('============================================') n = 10 step = 10000 n_samples = 10000 dim = 10 n_classes = 10 for i in range(n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') n_samples += step X = np.random.randn(n_samples, dim) Y = np.random.randint(0, n_classes, (n_samples,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(n_samples) bench_scikit_tree_regressor(X, Y) xx = range(0, n * step, step) plt.figure('scikit-learn tree benchmark results') plt.subplot(211) plt.title('Learning with varying number of samples') plt.plot(xx, scikit_classifier_results, 'g-', label='classification') plt.plot(xx, scikit_regressor_results, 'r-', label='regression') plt.legend(loc='upper left') plt.xlabel('number of samples') plt.ylabel('Time (s)') scikit_classifier_results = [] scikit_regressor_results = [] n = 10 step = 500 start_dim = 500 n_classes = 10 dim = start_dim for i in range(0, n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') dim += step X = np.random.randn(100, dim) Y = np.random.randint(0, n_classes, (100,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(100) bench_scikit_tree_regressor(X, Y) xx = np.arange(start_dim, start_dim + n * step, step) plt.subplot(212) plt.title('Learning in high dimensional spaces') plt.plot(xx, scikit_classifier_results, 'g-', label='classification') plt.plot(xx, scikit_regressor_results, 'r-', label='regression') plt.legend(loc='upper left') plt.xlabel('number of dimensions') plt.ylabel('Time (s)') plt.axis('tight') plt.show()
bsd-3-clause
ChampionZP/DeepLearningImplementations
Colorful/src/model/eval_colorful.py
2
1651
import os import sys import numpy as np import models_colorful import sklearn.neighbors as nn # Utils sys.path.append("../utils") import batch_utils import general_utils def eval(**kwargs): data_file = kwargs["data_file"] model_name = kwargs["model_name"] epoch = kwargs["epoch"] T = kwargs["T"] batch_size = kwargs["batch_size"] nb_neighbors = kwargs["nb_neighbors"] img_size = int(os.path.basename(data_file).split("_")[1]) # Create a batch generator for the color data DataGen = batch_utils.DataGenerator(data_file, batch_size=batch_size, dset="training") c, h, w = DataGen.get_config()["data_shape"][1:] # Load the array of quantized ab value q_ab = np.load("../../data/processed/pts_in_hull.npy") nb_q = q_ab.shape[0] # Fit a NN to q_ab nn_finder = nn.NearestNeighbors(n_neighbors=nb_neighbors, algorithm='ball_tree').fit(q_ab) # Load the color prior factor that encourages rare colors prior_factor = np.load("../../data/processed/CelebA_%s_prior_factor.npy" % img_size) # Load colorization model color_model = models_colorful.load(model_name, nb_q, (1, h, w), batch_size) color_model.load_weights("../../models/%s/%s_weights_epoch%s.h5" % (model_name, model_name, epoch)) for batch in DataGen.gen_batch(nn_finder, nb_q, prior_factor): X_batch_black, X_batch_color, Y_batch = batch general_utils.plot_batch_eval(color_model, q_ab, X_batch_black, X_batch_color, batch_size, h, w, nb_q, T)
mit
pianomania/scikit-learn
examples/covariance/plot_covariance_estimation.py
97
5074
""" ======================================================================= Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood ======================================================================= When working with covariance estimation, the usual approach is to use a maximum likelihood estimator, such as the :class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it converges to the true (population) covariance when given many observations. However, it can also be beneficial to regularize it, in order to reduce its variance; this, in turn, introduces some bias. This example illustrates the simple regularization used in :ref:`shrunk_covariance` estimators. In particular, it focuses on how to set the amount of regularization, i.e. how to choose the bias-variance trade-off. Here we compare 3 approaches: * Setting the parameter by cross-validating the likelihood on three folds according to a grid of potential shrinkage parameters. * A close formula proposed by Ledoit and Wolf to compute the asymptotically optimal regularization parameter (minimizing a MSE criterion), yielding the :class:`sklearn.covariance.LedoitWolf` covariance estimate. * An improvement of the Ledoit-Wolf shrinkage, the :class:`sklearn.covariance.OAS`, proposed by Chen et al. Its convergence is significantly better under the assumption that the data are Gaussian, in particular for small samples. To quantify estimation error, we plot the likelihood of unseen data for different values of the shrinkage parameter. We also show the choices by cross-validation, or with the LedoitWolf and OAS estimates. Note that the maximum likelihood estimate corresponds to no shrinkage, and thus performs poorly. The Ledoit-Wolf estimate performs really well, as it is close to the optimal and is computational not costly. In this example, the OAS estimate is a bit further away. Interestingly, both approaches outperform cross-validation, which is significantly most computationally costly. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import linalg from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \ log_likelihood, empirical_covariance from sklearn.model_selection import GridSearchCV ############################################################################### # Generate sample data n_features, n_samples = 40, 20 np.random.seed(42) base_X_train = np.random.normal(size=(n_samples, n_features)) base_X_test = np.random.normal(size=(n_samples, n_features)) # Color samples coloring_matrix = np.random.normal(size=(n_features, n_features)) X_train = np.dot(base_X_train, coloring_matrix) X_test = np.dot(base_X_test, coloring_matrix) ############################################################################### # Compute the likelihood on test data # spanning a range of possible shrinkage coefficient values shrinkages = np.logspace(-2, 0, 30) negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test) for s in shrinkages] # under the ground-truth model, which we would not have access to in real # settings real_cov = np.dot(coloring_matrix.T, coloring_matrix) emp_cov = empirical_covariance(X_train) loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov)) ############################################################################### # Compare different approaches to setting the parameter # GridSearch for an optimal shrinkage coefficient tuned_parameters = [{'shrinkage': shrinkages}] cv = GridSearchCV(ShrunkCovariance(), tuned_parameters) cv.fit(X_train) # Ledoit-Wolf optimal shrinkage coefficient estimate lw = LedoitWolf() loglik_lw = lw.fit(X_train).score(X_test) # OAS coefficient estimate oa = OAS() loglik_oa = oa.fit(X_train).score(X_test) ############################################################################### # Plot results fig = plt.figure() plt.title("Regularized covariance: likelihood and shrinkage coefficient") plt.xlabel('Regularizaton parameter: shrinkage coefficient') plt.ylabel('Error: negative log-likelihood on test data') # range shrinkage curve plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood") plt.plot(plt.xlim(), 2 * [loglik_real], '--r', label="Real covariance likelihood") # adjust view lik_max = np.amax(negative_logliks) lik_min = np.amin(negative_logliks) ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0])) ymax = lik_max + 10. * np.log(lik_max - lik_min) xmin = shrinkages[0] xmax = shrinkages[-1] # LW likelihood plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta', linewidth=3, label='Ledoit-Wolf estimate') # OAS likelihood plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple', linewidth=3, label='OAS estimate') # best CV estimator likelihood plt.vlines(cv.best_estimator_.shrinkage, ymin, -cv.best_estimator_.score(X_test), color='cyan', linewidth=3, label='Cross-validation best estimate') plt.ylim(ymin, ymax) plt.xlim(xmin, xmax) plt.legend() plt.show()
bsd-3-clause
kissmetrics/spark
python/pyspark/ml/base.py
45
3839
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABCMeta, abstractmethod from pyspark import since from pyspark.ml.param import Params from pyspark.ml.common import inherit_doc @inherit_doc class Estimator(Params): """ Abstract class for estimators that fit models to data. .. versionadded:: 1.3.0 """ __metaclass__ = ABCMeta @abstractmethod def _fit(self, dataset): """ Fits a model to the input dataset. This is called by the default implementation of fit. :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame` :returns: fitted model """ raise NotImplementedError() @since("1.3.0") def fit(self, dataset, params=None): """ Fits a model to the input dataset with optional parameters. :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame` :param params: an optional param map that overrides embedded params. If a list/tuple of param maps is given, this calls fit on each param map and returns a list of models. :returns: fitted model(s) """ if params is None: params = dict() if isinstance(params, (list, tuple)): return [self.fit(dataset, paramMap) for paramMap in params] elif isinstance(params, dict): if params: return self.copy(params)._fit(dataset) else: return self._fit(dataset) else: raise ValueError("Params must be either a param map or a list/tuple of param maps, " "but got %s." % type(params)) @inherit_doc class Transformer(Params): """ Abstract class for transformers that transform one dataset into another. .. versionadded:: 1.3.0 """ __metaclass__ = ABCMeta @abstractmethod def _transform(self, dataset): """ Transforms the input dataset. :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame` :returns: transformed dataset """ raise NotImplementedError() @since("1.3.0") def transform(self, dataset, params=None): """ Transforms the input dataset with optional parameters. :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame` :param params: an optional param map that overrides embedded params. :returns: transformed dataset """ if params is None: params = dict() if isinstance(params, dict): if params: return self.copy(params)._transform(dataset) else: return self._transform(dataset) else: raise ValueError("Params must be a param map but got %s." % type(params)) @inherit_doc class Model(Transformer): """ Abstract class for models that are fitted by estimators. .. versionadded:: 1.4.0 """ __metaclass__ = ABCMeta
apache-2.0
yyjiang/scikit-learn
sklearn/linear_model/tests/test_theil_sen.py
233
9928
""" Testing for Theil-Sen module (sklearn.linear_model.theil_sen) """ # Author: Florian Wilhelm <florian.wilhelm@gmail.com> # License: BSD 3 clause from __future__ import division, print_function, absolute_import import os import sys from contextlib import contextmanager import numpy as np from numpy.testing import assert_array_equal, assert_array_less from numpy.testing import assert_array_almost_equal, assert_warns from scipy.linalg import norm from scipy.optimize import fmin_bfgs from nose.tools import raises, assert_almost_equal from sklearn.utils import ConvergenceWarning from sklearn.linear_model import LinearRegression, TheilSenRegressor from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point from sklearn.linear_model.theil_sen import _modified_weiszfeld_step from sklearn.utils.testing import assert_greater, assert_less @contextmanager def no_stdout_stderr(): old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = open(os.devnull, 'w') sys.stderr = open(os.devnull, 'w') yield sys.stdout.flush() sys.stderr.flush() sys.stdout = old_stdout sys.stderr = old_stderr def gen_toy_problem_1d(intercept=True): random_state = np.random.RandomState(0) # Linear model y = 3*x + N(2, 0.1**2) w = 3. if intercept: c = 2. n_samples = 50 else: c = 0.1 n_samples = 100 x = random_state.normal(size=n_samples) noise = 0.1 * random_state.normal(size=n_samples) y = w * x + c + noise # Add some outliers if intercept: x[42], y[42] = (-2, 4) x[43], y[43] = (-2.5, 8) x[33], y[33] = (2.5, 1) x[49], y[49] = (2.1, 2) else: x[42], y[42] = (-2, 4) x[43], y[43] = (-2.5, 8) x[53], y[53] = (2.5, 1) x[60], y[60] = (2.1, 2) x[72], y[72] = (1.8, -7) return x[:, np.newaxis], y, w, c def gen_toy_problem_2d(): random_state = np.random.RandomState(0) n_samples = 100 # Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2) X = random_state.normal(size=(n_samples, 2)) w = np.array([5., 10.]) c = 1. noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise # Add some outliers n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) return X, y, w, c def gen_toy_problem_4d(): random_state = np.random.RandomState(0) n_samples = 10000 # Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2) X = random_state.normal(size=(n_samples, 4)) w = np.array([5., 10., 42., 7.]) c = 1. noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise # Add some outliers n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) return X, y, w, c def test_modweiszfeld_step_1d(): X = np.array([1., 2., 3.]).reshape(3, 1) # Check startvalue is element of X and solution median = 2. new_y = _modified_weiszfeld_step(X, median) assert_array_almost_equal(new_y, median) # Check startvalue is not the solution y = 2.5 new_y = _modified_weiszfeld_step(X, y) assert_array_less(median, new_y) assert_array_less(new_y, y) # Check startvalue is not the solution but element of X y = 3. new_y = _modified_weiszfeld_step(X, y) assert_array_less(median, new_y) assert_array_less(new_y, y) # Check that a single vector is identity X = np.array([1., 2., 3.]).reshape(1, 3) y = X[0, ] new_y = _modified_weiszfeld_step(X, y) assert_array_equal(y, new_y) def test_modweiszfeld_step_2d(): X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2) y = np.array([0.5, 0.5]) # Check first two iterations new_y = _modified_weiszfeld_step(X, y) assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3])) new_y = _modified_weiszfeld_step(X, new_y) assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592])) # Check fix point y = np.array([0.21132505, 0.78867497]) new_y = _modified_weiszfeld_step(X, y) assert_array_almost_equal(new_y, y) def test_spatial_median_1d(): X = np.array([1., 2., 3.]).reshape(3, 1) true_median = 2. _, median = _spatial_median(X) assert_array_almost_equal(median, true_median) # Test larger problem and for exact solution in 1d case random_state = np.random.RandomState(0) X = random_state.randint(100, size=(1000, 1)) true_median = np.median(X.ravel()) _, median = _spatial_median(X) assert_array_equal(median, true_median) def test_spatial_median_2d(): X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2) _, median = _spatial_median(X, max_iter=100, tol=1.e-6) def cost_func(y): dists = np.array([norm(x - y) for x in X]) return np.sum(dists) # Check if median is solution of the Fermat-Weber location problem fermat_weber = fmin_bfgs(cost_func, median, disp=False) assert_array_almost_equal(median, fermat_weber) # Check when maximum iteration is exceeded a warning is emitted assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.) def test_theil_sen_1d(): X, y, w, c = gen_toy_problem_1d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(np.abs(lstq.coef_ - w), 0.9) # Check that Theil-Sen works theil_sen = TheilSenRegressor(random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_theil_sen_1d_no_intercept(): X, y, w, c = gen_toy_problem_1d(intercept=False) # Check that Least Squares fails lstq = LinearRegression(fit_intercept=False).fit(X, y) assert_greater(np.abs(lstq.coef_ - w - c), 0.5) # Check that Theil-Sen works theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w + c, 1) assert_almost_equal(theil_sen.intercept_, 0.) def test_theil_sen_2d(): X, y, w, c = gen_toy_problem_2d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(norm(lstq.coef_ - w), 1.0) # Check that Theil-Sen works theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_calc_breakdown_point(): bp = _breakdown_point(1e10, 2) assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6) @raises(ValueError) def test_checksubparams_negative_subpopulation(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_too_few_subsamples(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_too_many_subsamples(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_n_subsamples_if_less_samples_than_features(): random_state = np.random.RandomState(0) n_samples, n_features = 10, 20 X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y) def test_subpopulation(): X, y, w, c = gen_toy_problem_4d() theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_subsamples(): X, y, w, c = gen_toy_problem_4d() theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) lstq = LinearRegression().fit(X, y) # Check for exact the same results as Least Squares assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9) def test_verbosity(): X, y, w, c = gen_toy_problem_1d() # Check that Theil-Sen can be verbose with no_stdout_stderr(): TheilSenRegressor(verbose=True, random_state=0).fit(X, y) TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y) def test_theil_sen_parallel(): X, y, w, c = gen_toy_problem_2d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(norm(lstq.coef_ - w), 1.0) # Check that Theil-Sen works theil_sen = TheilSenRegressor(n_jobs=-1, random_state=0, max_subpopulation=2e3).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_less_samples_than_features(): random_state = np.random.RandomState(0) n_samples, n_features = 10, 20 X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) # Check that Theil-Sen falls back to Least Squares if fit_intercept=False theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) lstq = LinearRegression(fit_intercept=False).fit(X, y) assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12) # Check fit_intercept=True case. This will not be equal to the Least # Squares solution since the intercept is calculated differently. theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y) y_pred = theil_sen.predict(X) assert_array_almost_equal(y_pred, y, 12)
bsd-3-clause
cainiaocome/scikit-learn
sklearn/externals/joblib/parallel.py
36
34375
""" Helpers for embarrassingly parallel code. """ # Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org > # Copyright: 2010, Gael Varoquaux # License: BSD 3 clause from __future__ import division import os import sys import gc import warnings from math import sqrt import functools import time import threading import itertools from numbers import Integral try: import cPickle as pickle except: import pickle from ._multiprocessing_helpers import mp if mp is not None: from .pool import MemmapingPool from multiprocessing.pool import ThreadPool from .format_stack import format_exc, format_outer_frames from .logger import Logger, short_format_time from .my_exceptions import TransportableException, _mk_exception from .disk import memstr_to_kbytes from ._compat import _basestring VALID_BACKENDS = ['multiprocessing', 'threading'] # Environment variables to protect against bad situations when nesting JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__" # In seconds, should be big enough to hide multiprocessing dispatching # overhead. # This settings was found by running benchmarks/bench_auto_batching.py # with various parameters on various platforms. MIN_IDEAL_BATCH_DURATION = .2 # Should not be too high to avoid stragglers: long jobs running alone # on a single worker while other workers have no work to process any more. MAX_IDEAL_BATCH_DURATION = 2 class BatchedCalls(object): """Wrap a sequence of (func, args, kwargs) tuples as a single callable""" def __init__(self, iterator_slice): self.items = list(iterator_slice) self._size = len(self.items) def __call__(self): return [func(*args, **kwargs) for func, args, kwargs in self.items] def __len__(self): return self._size ############################################################################### # CPU count that works also when multiprocessing has been disabled via # the JOBLIB_MULTIPROCESSING environment variable def cpu_count(): """ Return the number of CPUs. """ if mp is None: return 1 return mp.cpu_count() ############################################################################### # For verbosity def _verbosity_filter(index, verbose): """ Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index """ if not verbose: return True elif verbose > 10: return False if index == 0: return False verbose = .5 * (11 - verbose) ** 2 scale = sqrt(index / verbose) next_scale = sqrt((index + 1) / verbose) return (int(next_scale) == int(scale)) ############################################################################### class WorkerInterrupt(Exception): """ An exception that is not KeyboardInterrupt to allow subprocesses to be interrupted. """ pass ############################################################################### class SafeFunction(object): """ Wraps a function to make it exception with full traceback in their representation. Useful for parallel computing with multiprocessing, for which exceptions cannot be captured. """ def __init__(self, func): self.func = func def __call__(self, *args, **kwargs): try: return self.func(*args, **kwargs) except KeyboardInterrupt: # We capture the KeyboardInterrupt and reraise it as # something different, as multiprocessing does not # interrupt processing for a KeyboardInterrupt raise WorkerInterrupt() except: e_type, e_value, e_tb = sys.exc_info() text = format_exc(e_type, e_value, e_tb, context=10, tb_offset=1) if issubclass(e_type, TransportableException): raise else: raise TransportableException(text, e_type) ############################################################################### def delayed(function, check_pickle=True): """Decorator used to capture the arguments of a function. Pass `check_pickle=False` when: - performing a possibly repeated check is too costly and has been done already once outside of the call to delayed. - when used in conjunction `Parallel(backend='threading')`. """ # Try to pickle the input function, to catch the problems early when # using with multiprocessing: if check_pickle: pickle.dumps(function) def delayed_function(*args, **kwargs): return function, args, kwargs try: delayed_function = functools.wraps(function)(delayed_function) except AttributeError: " functools.wraps fails on some callable objects " return delayed_function ############################################################################### class ImmediateComputeBatch(object): """Sequential computation of a batch of tasks. This replicates the async computation API but actually does not delay the computations when joblib.Parallel runs in sequential mode. """ def __init__(self, batch): # Don't delay the application, to avoid keeping the input # arguments in memory self.results = batch() def get(self): return self.results ############################################################################### class BatchCompletionCallBack(object): """Callback used by joblib.Parallel's multiprocessing backend. This callable is executed by the parent process whenever a worker process has returned the results of a batch of tasks. It is used for progress reporting, to update estimate of the batch processing duration and to schedule the next batch of tasks to be processed. """ def __init__(self, dispatch_timestamp, batch_size, parallel): self.dispatch_timestamp = dispatch_timestamp self.batch_size = batch_size self.parallel = parallel def __call__(self, out): self.parallel.n_completed_tasks += self.batch_size this_batch_duration = time.time() - self.dispatch_timestamp if (self.parallel.batch_size == 'auto' and self.batch_size == self.parallel._effective_batch_size): # Update the smoothed streaming estimate of the duration of a batch # from dispatch to completion old_duration = self.parallel._smoothed_batch_duration if old_duration == 0: # First record of duration for this batch size after the last # reset. new_duration = this_batch_duration else: # Update the exponentially weighted average of the duration of # batch for the current effective size. new_duration = 0.8 * old_duration + 0.2 * this_batch_duration self.parallel._smoothed_batch_duration = new_duration self.parallel.print_progress() if self.parallel._original_iterator is not None: self.parallel.dispatch_next() ############################################################################### class Parallel(Logger): ''' Helper class for readable parallel mapping. Parameters ----------- n_jobs: int, default: 1 The maximum number of concurrently running jobs, such as the number of Python worker processes when backend="multiprocessing" or the size of the thread-pool when backend="threading". If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. backend: str or None, default: 'multiprocessing' Specify the parallelization backend implementation. Supported backends are: - "multiprocessing" used by default, can induce some communication and memory overhead when exchanging input and output data with the with the worker Python processes. - "threading" is a very low-overhead backend but it suffers from the Python Global Interpreter Lock if the called function relies a lot on Python objects. "threading" is mostly useful when the execution bottleneck is a compiled extension that explicitly releases the GIL (for instance a Cython loop wrapped in a "with nogil" block or an expensive call to a library such as NumPy). verbose: int, optional The verbosity level: if non zero, progress messages are printed. Above 50, the output is sent to stdout. The frequency of the messages increases with the verbosity level. If it more than 10, all iterations are reported. pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'} The number of batches (of tasks) to be pre-dispatched. Default is '2*n_jobs'. When batch_size="auto" this is reasonable default and the multiprocessing workers shoud never starve. batch_size: int or 'auto', default: 'auto' The number of atomic tasks to dispatch at once to each worker. When individual evaluations are very fast, multiprocessing can be slower than sequential computation because of the overhead. Batching fast computations together can mitigate this. The ``'auto'`` strategy keeps track of the time it takes for a batch to complete, and dynamically adjusts the batch size to keep the time on the order of half a second, using a heuristic. The initial batch size is 1. ``batch_size="auto"`` with ``backend="threading"`` will dispatch batches of a single task at a time as the threading backend has very little overhead and using larger batch size has not proved to bring any gain in that case. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. Only active when backend="multiprocessing". max_nbytes int, str, or None, optional, 1M by default Threshold on the size of arrays passed to the workers that triggers automated memory mapping in temp_folder. Can be an int in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. Use None to disable memmaping of large arrays. Only active when backend="multiprocessing". Notes ----- This object uses the multiprocessing module to compute in parallel the application of a function to many different arguments. The main functionality it brings in addition to using the raw multiprocessing API are (see examples for details): * More readable code, in particular since it avoids constructing list of arguments. * Easier debugging: - informative tracebacks even when the error happens on the client side - using 'n_jobs=1' enables to turn off parallel computing for debugging without changing the codepath - early capture of pickling errors * An optional progress meter. * Interruption of multiprocesses jobs with 'Ctrl-C' * Flexible pickling control for the communication to and from the worker processes. * Ability to use shared memory efficiently with worker processes for large numpy-based datastructures. Examples -------- A simple example: >>> from math import sqrt >>> from sklearn.externals.joblib import Parallel, delayed >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] Reshaping the output when the function has several return values: >>> from math import modf >>> from sklearn.externals.joblib import Parallel, delayed >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10)) >>> res, i = zip(*r) >>> res (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5) >>> i (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0) The progress meter: the higher the value of `verbose`, the more messages:: >>> from time import sleep >>> from sklearn.externals.joblib import Parallel, delayed >>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP [Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s [Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s [Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s [Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished Traceback example, note how the line of the error is indicated as well as the values of the parameter passed to the function that triggered the exception, even though the traceback happens in the child process:: >>> from heapq import nlargest >>> from sklearn.externals.joblib import Parallel, delayed >>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP #... --------------------------------------------------------------------------- Sub-process traceback: --------------------------------------------------------------------------- TypeError Mon Nov 12 11:37:46 2012 PID: 12934 Python 2.7.3: /usr/bin/python ........................................................................... /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None) 419 if n >= size: 420 return sorted(iterable, key=key, reverse=True)[:n] 421 422 # When key is none, use simpler decoration 423 if key is None: --> 424 it = izip(iterable, count(0,-1)) # decorate 425 result = _nlargest(n, it) 426 return map(itemgetter(0), result) # undecorate 427 428 # General case, slowest method TypeError: izip argument #1 must support iteration ___________________________________________________________________________ Using pre_dispatch in a producer/consumer situation, where the data is generated on the fly. Note how the producer is first called a 3 times before the parallel loop is initiated, and then called to generate new data on the fly. In this case the total number of iterations cannot be reported in the progress messages:: >>> from math import sqrt >>> from sklearn.externals.joblib import Parallel, delayed >>> def producer(): ... for i in range(6): ... print('Produced %s' % i) ... yield i >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')( ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP Produced 0 Produced 1 Produced 2 [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s Produced 3 [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s Produced 4 [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s Produced 5 [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s [Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished ''' def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0, pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None, max_nbytes='1M', mmap_mode='r'): self.verbose = verbose self._mp_context = None if backend is None: # `backend=None` was supported in 0.8.2 with this effect backend = "multiprocessing" elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'): # Make it possible to pass a custom multiprocessing context as # backend to change the start method to forkserver or spawn or # preload modules on the forkserver helper process. self._mp_context = backend backend = "multiprocessing" if backend not in VALID_BACKENDS: raise ValueError("Invalid backend: %s, expected one of %r" % (backend, VALID_BACKENDS)) self.backend = backend self.n_jobs = n_jobs if (batch_size == 'auto' or isinstance(batch_size, Integral) and batch_size > 0): self.batch_size = batch_size else: raise ValueError( "batch_size must be 'auto' or a positive integer, got: %r" % batch_size) self.pre_dispatch = pre_dispatch self._pool = None self._temp_folder = temp_folder if isinstance(max_nbytes, _basestring): self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes) else: self._max_nbytes = max_nbytes self._mmap_mode = mmap_mode # Not starting the pool in the __init__ is a design decision, to be # able to close it ASAP, and not burden the user with closing it. self._output = None self._jobs = list() # A flag used to abort the dispatching of jobs in case an # exception is found self._aborting = False def _dispatch(self, batch): """Queue the batch for computing, with or without multiprocessing WARNING: this method is not thread-safe: it should be only called indirectly via dispatch_one_batch. """ if self._pool is None: job = ImmediateComputeBatch(batch) self._jobs.append(job) self.n_dispatched_batches += 1 self.n_dispatched_tasks += len(batch) self.n_completed_tasks += len(batch) if not _verbosity_filter(self.n_dispatched_batches, self.verbose): self._print('Done %3i tasks | elapsed: %s', (self.n_completed_tasks, short_format_time(time.time() - self._start_time) )) else: # If job.get() catches an exception, it closes the queue: if self._aborting: return dispatch_timestamp = time.time() cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self) job = self._pool.apply_async(SafeFunction(batch), callback=cb) self._jobs.append(job) self.n_dispatched_tasks += len(batch) self.n_dispatched_batches += 1 def dispatch_next(self): """Dispatch more data for parallel processing This method is meant to be called concurrently by the multiprocessing callback. We rely on the thread-safety of dispatch_one_batch to protect against concurrent consumption of the unprotected iterator. """ if not self.dispatch_one_batch(self._original_iterator): self._iterating = False self._original_iterator = None def dispatch_one_batch(self, iterator): """Prefetch the tasks for the next batch and dispatch them. The effective size of the batch is computed here. If there are no more jobs to dispatch, return False, else return True. The iterator consumption and dispatching is protected by the same lock so calling this function should be thread safe. """ if self.batch_size == 'auto' and self.backend == 'threading': # Batching is never beneficial with the threading backend batch_size = 1 elif self.batch_size == 'auto': old_batch_size = self._effective_batch_size batch_duration = self._smoothed_batch_duration if (batch_duration > 0 and batch_duration < MIN_IDEAL_BATCH_DURATION): # The current batch size is too small: the duration of the # processing of a batch of task is not large enough to hide # the scheduling overhead. ideal_batch_size = int( old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration) # Multiply by two to limit oscilations between min and max. batch_size = max(2 * ideal_batch_size, 1) self._effective_batch_size = batch_size if self.verbose >= 10: self._print("Batch computation too fast (%.4fs.) " "Setting batch_size=%d.", ( batch_duration, batch_size)) elif (batch_duration > MAX_IDEAL_BATCH_DURATION and old_batch_size >= 2): # The current batch size is too big. If we schedule overly long # running batches some CPUs might wait with nothing left to do # while a couple of CPUs a left processing a few long running # batches. Better reduce the batch size a bit to limit the # likelihood of scheduling such stragglers. self._effective_batch_size = batch_size = old_batch_size // 2 if self.verbose >= 10: self._print("Batch computation too slow (%.2fs.) " "Setting batch_size=%d.", ( batch_duration, batch_size)) else: # No batch size adjustment batch_size = old_batch_size if batch_size != old_batch_size: # Reset estimation of the smoothed mean batch duration: this # estimate is updated in the multiprocessing apply_async # CallBack as long as the batch_size is constant. Therefore # we need to reset the estimate whenever we re-tune the batch # size. self._smoothed_batch_duration = 0 else: # Fixed batch size strategy batch_size = self.batch_size with self._lock: tasks = BatchedCalls(itertools.islice(iterator, batch_size)) if not tasks: # No more tasks available in the iterator: tell caller to stop. return False else: self._dispatch(tasks) return True def _print(self, msg, msg_args): """Display the message on stout or stderr depending on verbosity""" # XXX: Not using the logger framework: need to # learn to use logger better. if not self.verbose: return if self.verbose < 50: writer = sys.stderr.write else: writer = sys.stdout.write msg = msg % msg_args writer('[%s]: %s\n' % (self, msg)) def print_progress(self): """Display the process of the parallel execution only a fraction of time, controlled by self.verbose. """ if not self.verbose: return elapsed_time = time.time() - self._start_time # This is heuristic code to print only 'verbose' times a messages # The challenge is that we may not know the queue length if self._original_iterator: if _verbosity_filter(self.n_dispatched_batches, self.verbose): return self._print('Done %3i tasks | elapsed: %s', (self.n_completed_tasks, short_format_time(elapsed_time), )) else: index = self.n_dispatched_batches # We are finished dispatching total_tasks = self.n_dispatched_tasks # We always display the first loop if not index == 0: # Display depending on the number of remaining items # A message as soon as we finish dispatching, cursor is 0 cursor = (total_tasks - index + 1 - self._pre_dispatch_amount) frequency = (total_tasks // self.verbose) + 1 is_last_item = (index + 1 == total_tasks) if (is_last_item or cursor % frequency): return remaining_time = (elapsed_time / (index + 1) * (self.n_dispatched_tasks - index - 1.)) self._print('Done %3i out of %3i | elapsed: %s remaining: %s', (index + 1, total_tasks, short_format_time(elapsed_time), short_format_time(remaining_time), )) def retrieve(self): self._output = list() while self._iterating or len(self._jobs) > 0: if len(self._jobs) == 0: # Wait for an async callback to dispatch new jobs time.sleep(0.01) continue # We need to be careful: the job queue can be filling up as # we empty it if hasattr(self, '_lock'): self._lock.acquire() job = self._jobs.pop(0) if hasattr(self, '_lock'): self._lock.release() try: self._output.extend(job.get()) except tuple(self.exceptions) as exception: try: self._aborting = True self._lock.acquire() if isinstance(exception, (KeyboardInterrupt, WorkerInterrupt)): # We have captured a user interruption, clean up # everything if hasattr(self, '_pool'): self._pool.close() self._pool.terminate() # We can now allow subprocesses again os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0) raise exception elif isinstance(exception, TransportableException): # Capture exception to add information on the local # stack in addition to the distant stack this_report = format_outer_frames(context=10, stack_start=1) report = """Multiprocessing exception: %s --------------------------------------------------------------------------- Sub-process traceback: --------------------------------------------------------------------------- %s""" % ( this_report, exception.message, ) # Convert this to a JoblibException exception_type = _mk_exception(exception.etype)[0] raise exception_type(report) raise exception finally: self._lock.release() def __call__(self, iterable): if self._jobs: raise ValueError('This Parallel instance is already running') n_jobs = self.n_jobs if n_jobs == 0: raise ValueError('n_jobs == 0 in Parallel has no meaning') if n_jobs < 0 and mp is not None: n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1) # The list of exceptions that we will capture self.exceptions = [TransportableException] self._lock = threading.Lock() # Whether or not to set an environment flag to track # multiple process spawning set_environ_flag = False if (n_jobs is None or mp is None or n_jobs == 1): n_jobs = 1 self._pool = None elif self.backend == 'threading': self._pool = ThreadPool(n_jobs) elif self.backend == 'multiprocessing': if mp.current_process().daemon: # Daemonic processes cannot have children n_jobs = 1 self._pool = None warnings.warn( 'Multiprocessing-backed parallel loops cannot be nested,' ' setting n_jobs=1', stacklevel=2) elif threading.current_thread().name != 'MainThread': # Prevent posix fork inside in non-main posix threads n_jobs = 1 self._pool = None warnings.warn( 'Multiprocessing backed parallel loops cannot be nested' ' below threads, setting n_jobs=1', stacklevel=2) else: already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0)) if already_forked: raise ImportError('[joblib] Attempting to do parallel computing ' 'without protecting your import on a system that does ' 'not support forking. To use parallel-computing in a ' 'script, you must protect your main loop using "if ' "__name__ == '__main__'" '". Please see the joblib documentation on Parallel ' 'for more information' ) # Make sure to free as much memory as possible before forking gc.collect() # Set an environment variable to avoid infinite loops set_environ_flag = True poolargs = dict( max_nbytes=self._max_nbytes, mmap_mode=self._mmap_mode, temp_folder=self._temp_folder, verbose=max(0, self.verbose - 50), context_id=0, # the pool is used only for one call ) if self._mp_context is not None: # Use Python 3.4+ multiprocessing context isolation poolargs['context'] = self._mp_context self._pool = MemmapingPool(n_jobs, **poolargs) # We are using multiprocessing, we also want to capture # KeyboardInterrupts self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt]) else: raise ValueError("Unsupported backend: %s" % self.backend) if self.batch_size == 'auto': self._effective_batch_size = 1 iterator = iter(iterable) pre_dispatch = self.pre_dispatch if pre_dispatch == 'all' or n_jobs == 1: # prevent further dispatch via multiprocessing callback thread self._original_iterator = None self._pre_dispatch_amount = 0 else: self._original_iterator = iterator if hasattr(pre_dispatch, 'endswith'): pre_dispatch = eval(pre_dispatch) self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch) # The main thread will consume the first pre_dispatch items and # the remaining items will later be lazily dispatched by async # callbacks upon task completions. iterator = itertools.islice(iterator, pre_dispatch) self._start_time = time.time() self.n_dispatched_batches = 0 self.n_dispatched_tasks = 0 self.n_completed_tasks = 0 self._smoothed_batch_duration = 0.0 try: if set_environ_flag: # Set an environment variable to avoid infinite loops os.environ[JOBLIB_SPAWNED_PROCESS] = '1' self._iterating = True while self.dispatch_one_batch(iterator): pass if pre_dispatch == "all" or n_jobs == 1: # The iterable was consumed all at once by the above for loop. # No need to wait for async callbacks to trigger to # consumption. self._iterating = False self.retrieve() # Make sure that we get a last message telling us we are done elapsed_time = time.time() - self._start_time self._print('Done %3i out of %3i | elapsed: %s finished', (len(self._output), len(self._output), short_format_time(elapsed_time) )) finally: if n_jobs > 1: self._pool.close() self._pool.terminate() # terminate does a join() if self.backend == 'multiprocessing': os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0) self._jobs = list() output = self._output self._output = None return output def __repr__(self): return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
bsd-3-clause
pianomania/scikit-learn
sklearn/externals/joblib/__init__.py
23
5101
""" Joblib is a set of tools to provide **lightweight pipelining in Python**. In particular, joblib offers: 1. transparent disk-caching of the output values and lazy re-evaluation (memoize pattern) 2. easy simple parallel computing 3. logging and tracing of the execution Joblib is optimized to be **fast** and **robust** in particular on large data and has specific optimizations for `numpy` arrays. It is **BSD-licensed**. ============================== ============================================ **User documentation**: http://pythonhosted.org/joblib **Download packages**: http://pypi.python.org/pypi/joblib#downloads **Source code**: http://github.com/joblib/joblib **Report issues**: http://github.com/joblib/joblib/issues ============================== ============================================ Vision -------- The vision is to provide tools to easily achieve better performance and reproducibility when working with long running jobs. * **Avoid computing twice the same thing**: code is rerun over an over, for instance when prototyping computational-heavy jobs (as in scientific development), but hand-crafted solution to alleviate this issue is error-prone and often leads to unreproducible results * **Persist to disk transparently**: persisting in an efficient way arbitrary objects containing large data is hard. Using joblib's caching mechanism avoids hand-written persistence and implicitly links the file on disk to the execution context of the original Python object. As a result, joblib's persistence is good for resuming an application status or computational job, eg after a crash. Joblib strives to address these problems while **leaving your code and your flow control as unmodified as possible** (no framework, no new paradigms). Main features ------------------ 1) **Transparent and fast disk-caching of output value:** a memoize or make-like functionality for Python functions that works well for arbitrary Python objects, including very large numpy arrays. Separate persistence and flow-execution logic from domain logic or algorithmic code by writing the operations as a set of steps with well-defined inputs and outputs: Python functions. Joblib can save their computation to disk and rerun it only if necessary:: >>> from sklearn.externals.joblib import Memory >>> mem = Memory(cachedir='/tmp/joblib') >>> import numpy as np >>> a = np.vander(np.arange(3)).astype(np.float) >>> square = mem.cache(np.square) >>> b = square(a) # doctest: +ELLIPSIS ________________________________________________________________________________ [Memory] Calling square... square(array([[ 0., 0., 1.], [ 1., 1., 1.], [ 4., 2., 1.]])) ___________________________________________________________square - 0...s, 0.0min >>> c = square(a) >>> # The above call did not trigger an evaluation 2) **Embarrassingly parallel helper:** to make it easy to write readable parallel code and debug it quickly:: >>> from sklearn.externals.joblib import Parallel, delayed >>> from math import sqrt >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] 3) **Logging/tracing:** The different functionalities will progressively acquire better logging mechanism to help track what has been ran, and capture I/O easily. In addition, Joblib will provide a few I/O primitives, to easily define logging and display streams, and provide a way of compiling a report. We want to be able to quickly inspect what has been run. 4) **Fast compressed Persistence**: a replacement for pickle to work efficiently on Python objects containing large data ( *joblib.dump* & *joblib.load* ). .. >>> import shutil ; shutil.rmtree('/tmp/joblib/') """ # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.10.3' from .memory import Memory, MemorizedResult from .logger import PrintTime from .logger import Logger from .hashing import hash from .numpy_pickle import dump from .numpy_pickle import load from .parallel import Parallel from .parallel import delayed from .parallel import cpu_count from .parallel import register_parallel_backend from .parallel import parallel_backend from .parallel import effective_n_jobs __all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump', 'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs', 'register_parallel_backend', 'parallel_backend']
bsd-3-clause
PatrickOReilly/scikit-learn
sklearn/mixture/tests/test_gmm.py
4
20668
# These tests are those of the deprecated GMM class import unittest import copy import sys from nose.tools import assert_true import numpy as np from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_raises) from scipy import stats from sklearn import mixture from sklearn.datasets.samples_generator import make_spd_matrix from sklearn.utils.testing import (assert_greater, assert_raise_message, assert_warns_message, ignore_warnings) from sklearn.metrics.cluster import adjusted_rand_score from sklearn.externals.six.moves import cStringIO as StringIO rng = np.random.RandomState(0) def test_sample_gaussian(): # Test sample generation from mixture.sample_gaussian where covariance # is diagonal, spherical and full n_features, n_samples = 2, 300 axis = 1 mu = rng.randint(10) * rng.rand(n_features) cv = (rng.rand(n_features) + 1.0) ** 2 samples = mixture.sample_gaussian( mu, cv, covariance_type='diag', n_samples=n_samples) assert_true(np.allclose(samples.mean(axis), mu, atol=1.3)) assert_true(np.allclose(samples.var(axis), cv, atol=1.5)) # the same for spherical covariances cv = (rng.rand() + 1.0) ** 2 samples = mixture.sample_gaussian( mu, cv, covariance_type='spherical', n_samples=n_samples) assert_true(np.allclose(samples.mean(axis), mu, atol=1.5)) assert_true(np.allclose( samples.var(axis), np.repeat(cv, n_features), atol=1.5)) # and for full covariances A = rng.randn(n_features, n_features) cv = np.dot(A.T, A) + np.eye(n_features) samples = mixture.sample_gaussian( mu, cv, covariance_type='full', n_samples=n_samples) assert_true(np.allclose(samples.mean(axis), mu, atol=1.3)) assert_true(np.allclose(np.cov(samples), cv, atol=2.5)) # Numerical stability check: in SciPy 0.12.0 at least, eigh may return # tiny negative values in its second return value. from sklearn.mixture import sample_gaussian x = sample_gaussian([0, 0], [[4, 3], [1, .1]], covariance_type='full', random_state=42) assert_true(np.isfinite(x).all()) def _naive_lmvnpdf_diag(X, mu, cv): # slow and naive implementation of lmvnpdf ref = np.empty((len(X), len(mu))) stds = np.sqrt(cv) for i, (m, std) in enumerate(zip(mu, stds)): ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1) return ref def test_lmvnpdf_diag(): # test a slow and naive implementation of lmvnpdf and # compare it to the vectorized version (mixture.lmvnpdf) to test # for correctness n_features, n_components, n_samples = 2, 3, 10 mu = rng.randint(10) * rng.rand(n_components, n_features) cv = (rng.rand(n_components, n_features) + 1.0) ** 2 X = rng.randint(10) * rng.rand(n_samples, n_features) ref = _naive_lmvnpdf_diag(X, mu, cv) lpr = assert_warns_message(DeprecationWarning, "The function" " log_multivariate_normal_density is " "deprecated in 0.18 and will be removed in 0.20.", mixture.log_multivariate_normal_density, X, mu, cv, 'diag') assert_array_almost_equal(lpr, ref) def test_lmvnpdf_spherical(): n_features, n_components, n_samples = 2, 3, 10 mu = rng.randint(10) * rng.rand(n_components, n_features) spherecv = rng.rand(n_components, 1) ** 2 + 1 X = rng.randint(10) * rng.rand(n_samples, n_features) cv = np.tile(spherecv, (n_features, 1)) reference = _naive_lmvnpdf_diag(X, mu, cv) lpr = assert_warns_message(DeprecationWarning, "The function" " log_multivariate_normal_density is " "deprecated in 0.18 and will be removed in 0.20.", mixture.log_multivariate_normal_density, X, mu, spherecv, 'spherical') assert_array_almost_equal(lpr, reference) def test_lmvnpdf_full(): n_features, n_components, n_samples = 2, 3, 10 mu = rng.randint(10) * rng.rand(n_components, n_features) cv = (rng.rand(n_components, n_features) + 1.0) ** 2 X = rng.randint(10) * rng.rand(n_samples, n_features) fullcv = np.array([np.diag(x) for x in cv]) reference = _naive_lmvnpdf_diag(X, mu, cv) lpr = assert_warns_message(DeprecationWarning, "The function" " log_multivariate_normal_density is " "deprecated in 0.18 and will be removed in 0.20.", mixture.log_multivariate_normal_density, X, mu, fullcv, 'full') assert_array_almost_equal(lpr, reference) def test_lvmpdf_full_cv_non_positive_definite(): n_features, n_samples = 2, 10 rng = np.random.RandomState(0) X = rng.randint(10) * rng.rand(n_samples, n_features) mu = np.mean(X, 0) cv = np.array([[[-1, 0], [0, 1]]]) expected_message = "'covars' must be symmetric, positive-definite" assert_raise_message(ValueError, expected_message, mixture.log_multivariate_normal_density, X, mu, cv, 'full') # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_GMM_attributes(): n_components, n_features = 10, 4 covariance_type = 'diag' g = mixture.GMM(n_components, covariance_type, random_state=rng) weights = rng.rand(n_components) weights = weights / weights.sum() means = rng.randint(-20, 20, (n_components, n_features)) assert_true(g.n_components == n_components) assert_true(g.covariance_type == covariance_type) g.weights_ = weights assert_array_almost_equal(g.weights_, weights) g.means_ = means assert_array_almost_equal(g.means_, means) covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2 g.covars_ = covars assert_array_almost_equal(g.covars_, covars) assert_raises(ValueError, g._set_covars, []) assert_raises(ValueError, g._set_covars, np.zeros((n_components - 2, n_features))) assert_raises(ValueError, mixture.GMM, n_components=20, covariance_type='badcovariance_type') class GMMTester(): do_test_eval = True def _setUp(self): self.n_components = 10 self.n_features = 4 self.weights = rng.rand(self.n_components) self.weights = self.weights / self.weights.sum() self.means = rng.randint(-20, 20, (self.n_components, self.n_features)) self.threshold = -0.5 self.I = np.eye(self.n_features) self.covars = { 'spherical': (0.1 + 2 * rng.rand(self.n_components, self.n_features)) ** 2, 'tied': (make_spd_matrix(self.n_features, random_state=0) + 5 * self.I), 'diag': (0.1 + 2 * rng.rand(self.n_components, self.n_features)) ** 2, 'full': np.array([make_spd_matrix(self.n_features, random_state=0) + 5 * self.I for x in range(self.n_components)])} # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_eval(self): if not self.do_test_eval: return # DPGMM does not support setting the means and # covariances before fitting There is no way of fixing this # due to the variational parameters being more expressive than # covariance matrices g = self.model(n_components=self.n_components, covariance_type=self.covariance_type, random_state=rng) # Make sure the means are far apart so responsibilities.argmax() # picks the actual component used to generate the observations. g.means_ = 20 * self.means g.covars_ = self.covars[self.covariance_type] g.weights_ = self.weights gaussidx = np.repeat(np.arange(self.n_components), 5) n_samples = len(gaussidx) X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx] with ignore_warnings(category=DeprecationWarning): ll, responsibilities = g.score_samples(X) self.assertEqual(len(ll), n_samples) self.assertEqual(responsibilities.shape, (n_samples, self.n_components)) assert_array_almost_equal(responsibilities.sum(axis=1), np.ones(n_samples)) assert_array_equal(responsibilities.argmax(axis=1), gaussidx) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_sample(self, n=100): g = self.model(n_components=self.n_components, covariance_type=self.covariance_type, random_state=rng) # Make sure the means are far apart so responsibilities.argmax() # picks the actual component used to generate the observations. g.means_ = 20 * self.means g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1) g.weights_ = self.weights with ignore_warnings(category=DeprecationWarning): samples = g.sample(n) self.assertEqual(samples.shape, (n, self.n_features)) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_train(self, params='wmc'): g = mixture.GMM(n_components=self.n_components, covariance_type=self.covariance_type) with ignore_warnings(category=DeprecationWarning): g.weights_ = self.weights g.means_ = self.means g.covars_ = 20 * self.covars[self.covariance_type] # Create a training set by sampling from the predefined distribution. with ignore_warnings(category=DeprecationWarning): X = g.sample(n_samples=100) g = self.model(n_components=self.n_components, covariance_type=self.covariance_type, random_state=rng, min_covar=1e-1, n_iter=1, init_params=params) g.fit(X) # Do one training iteration at a time so we can keep track of # the log likelihood to make sure that it increases after each # iteration. trainll = [] with ignore_warnings(category=DeprecationWarning): for _ in range(5): g.params = params g.init_params = '' g.fit(X) trainll.append(self.score(g, X)) g.n_iter = 10 g.init_params = '' g.params = params g.fit(X) # finish fitting # Note that the log likelihood will sometimes decrease by a # very small amount after it has more or less converged due to # the addition of min_covar to the covariance (to prevent # underflow). This is why the threshold is set to -0.5 # instead of 0. with ignore_warnings(category=DeprecationWarning): delta_min = np.diff(trainll).min() self.assertTrue( delta_min > self.threshold, "The min nll increase is %f which is lower than the admissible" " threshold of %f, for model %s. The likelihoods are %s." % (delta_min, self.threshold, self.covariance_type, trainll)) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_train_degenerate(self, params='wmc'): # Train on degenerate data with 0 in some dimensions # Create a training set by sampling from the predefined # distribution. X = rng.randn(100, self.n_features) X.T[1:] = 0 g = self.model(n_components=2, covariance_type=self.covariance_type, random_state=rng, min_covar=1e-3, n_iter=5, init_params=params) with ignore_warnings(category=DeprecationWarning): g.fit(X) trainll = g.score(X) self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_train_1d(self, params='wmc'): # Train on 1-D data # Create a training set by sampling from the predefined # distribution. X = rng.randn(100, 1) # X.T[1:] = 0 g = self.model(n_components=2, covariance_type=self.covariance_type, random_state=rng, min_covar=1e-7, n_iter=5, init_params=params) with ignore_warnings(category=DeprecationWarning): g.fit(X) trainll = g.score(X) if isinstance(g, mixture.dpgmm._DPGMMBase): self.assertTrue(np.sum(np.abs(trainll / 100)) < 5) else: self.assertTrue(np.sum(np.abs(trainll / 100)) < 2) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def score(self, g, X): with ignore_warnings(category=DeprecationWarning): return g.score(X).sum() class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester): covariance_type = 'spherical' model = mixture.GMM setUp = GMMTester._setUp class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester): covariance_type = 'diag' model = mixture.GMM setUp = GMMTester._setUp class TestGMMWithTiedCovars(unittest.TestCase, GMMTester): covariance_type = 'tied' model = mixture.GMM setUp = GMMTester._setUp class TestGMMWithFullCovars(unittest.TestCase, GMMTester): covariance_type = 'full' model = mixture.GMM setUp = GMMTester._setUp # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_multiple_init(): # Test that multiple inits does not much worse than a single one X = rng.randn(30, 5) X[:10] += 2 g = mixture.GMM(n_components=2, covariance_type='spherical', random_state=rng, min_covar=1e-7, n_iter=5) with ignore_warnings(category=DeprecationWarning): train1 = g.fit(X).score(X).sum() g.n_init = 5 train2 = g.fit(X).score(X).sum() assert_true(train2 >= train1 - 1.e-2) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_n_parameters(): n_samples, n_dim, n_components = 7, 5, 2 X = rng.randn(n_samples, n_dim) n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41} for cv_type in ['full', 'tied', 'diag', 'spherical']: with ignore_warnings(category=DeprecationWarning): g = mixture.GMM(n_components=n_components, covariance_type=cv_type, random_state=rng, min_covar=1e-7, n_iter=1) g.fit(X) assert_true(g._n_parameters() == n_params[cv_type]) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_1d_1component(): # Test all of the covariance_types return the same BIC score for # 1-dimensional, 1 component fits. n_samples, n_dim, n_components = 100, 1, 1 X = rng.randn(n_samples, n_dim) g_full = mixture.GMM(n_components=n_components, covariance_type='full', random_state=rng, min_covar=1e-7, n_iter=1) with ignore_warnings(category=DeprecationWarning): g_full.fit(X) g_full_bic = g_full.bic(X) for cv_type in ['tied', 'diag', 'spherical']: g = mixture.GMM(n_components=n_components, covariance_type=cv_type, random_state=rng, min_covar=1e-7, n_iter=1) g.fit(X) assert_array_almost_equal(g.bic(X), g_full_bic) def assert_fit_predict_correct(model, X): model2 = copy.deepcopy(model) predictions_1 = model.fit(X).predict(X) predictions_2 = model2.fit_predict(X) assert adjusted_rand_score(predictions_1, predictions_2) == 1.0 # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_fit_predict(): """ test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict """ lrng = np.random.RandomState(101) n_samples, n_dim, n_comps = 100, 2, 2 mu = np.array([[8, 8]]) component_0 = lrng.randn(n_samples, n_dim) component_1 = lrng.randn(n_samples, n_dim) + mu X = np.vstack((component_0, component_1)) for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM): model = m_constructor(n_components=n_comps, covariance_type='full', min_covar=1e-7, n_iter=5, random_state=np.random.RandomState(0)) assert_fit_predict_correct(model, X) model = mixture.GMM(n_components=n_comps, n_iter=0) z = model.fit_predict(X) assert np.all(z == 0), "Quick Initialization Failed!" # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_aic(): # Test the aic and bic criteria n_samples, n_dim, n_components = 50, 3, 2 X = rng.randn(n_samples, n_dim) SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy for cv_type in ['full', 'tied', 'diag', 'spherical']: g = mixture.GMM(n_components=n_components, covariance_type=cv_type, random_state=rng, min_covar=1e-7) g.fit(X) aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters() bic = (2 * n_samples * SGH * n_dim + np.log(n_samples) * g._n_parameters()) bound = n_dim * 3. / np.sqrt(n_samples) assert_true(np.abs(g.aic(X) - aic) / n_samples < bound) assert_true(np.abs(g.bic(X) - bic) / n_samples < bound) # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def check_positive_definite_covars(covariance_type): r"""Test that covariance matrices do not become non positive definite Due to the accumulation of round-off errors, the computation of the covariance matrices during the learning phase could lead to non-positive definite covariance matrices. Namely the use of the formula: .. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T instead of: .. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T while mathematically equivalent, was observed a ``LinAlgError`` exception, when computing a ``GMM`` with full covariance matrices and fixed mean. This function ensures that some later optimization will not introduce the problem again. """ rng = np.random.RandomState(1) # we build a dataset with 2 2d component. The components are unbalanced # (respective weights 0.9 and 0.1) X = rng.randn(100, 2) X[-10:] += (3, 3) # Shift the 10 last points gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type, min_covar=1e-3) # This is a non-regression test for issue #2640. The following call used # to trigger: # numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite gmm.fit(X) if covariance_type == "diag" or covariance_type == "spherical": assert_greater(gmm.covars_.min(), 0) else: if covariance_type == "tied": covs = [gmm.covars_] else: covs = gmm.covars_ for c in covs: assert_greater(np.linalg.det(c), 0) def test_positive_definite_covars(): # Check positive definiteness for all covariance types for covariance_type in ["full", "tied", "diag", "spherical"]: yield check_positive_definite_covars, covariance_type # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_verbose_first_level(): # Create sample data X = rng.randn(30, 5) X[:10] += 2 g = mixture.GMM(n_components=2, n_init=2, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: g.fit(X) finally: sys.stdout = old_stdout # This function tests the deprecated old GMM class @ignore_warnings(category=DeprecationWarning) def test_verbose_second_level(): # Create sample data X = rng.randn(30, 5) X[:10] += 2 g = mixture.GMM(n_components=2, n_init=2, verbose=2) old_stdout = sys.stdout sys.stdout = StringIO() try: g.fit(X) finally: sys.stdout = old_stdout
bsd-3-clause
PatrickOReilly/scikit-learn
sklearn/calibration.py
17
19402
"""Calibration of predicted probabilities.""" # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Balazs Kegl <balazs.kegl@gmail.com> # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # Mathieu Blondel <mathieu@mblondel.org> # # License: BSD 3 clause from __future__ import division import warnings from math import log import numpy as np from scipy.optimize import fmin_bfgs from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone from .preprocessing import LabelBinarizer from .utils import check_X_y, check_array, indexable, column_or_1d from .utils.validation import check_is_fitted from .utils.fixes import signature from .isotonic import IsotonicRegression from .svm import LinearSVC from .model_selection import check_cv from .metrics.classification import _check_binary_probabilistic_predictions class CalibratedClassifierCV(BaseEstimator, ClassifierMixin): """Probability calibration with isotonic regression or sigmoid. With this class, the base_estimator is fit on the train set of the cross-validation generator and the test set is used for calibration. The probabilities for each of the folds are then averaged for prediction. In case that cv="prefit" is passed to __init__, it is assumed that base_estimator has been fitted already and all data is used for calibration. Note that data for fitting the classifier and for calibrating it must be disjoint. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- base_estimator : instance BaseEstimator The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs. If cv=prefit, the classifier must have been fit already on data. method : 'sigmoid' or 'isotonic' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parametric approach. It is not advised to use isotonic calibration with too few calibration samples ``(<<1000)`` since it tends to overfit. Use sigmoids (Platt's calibration) in this case. cv : integer, cross-validation generator, iterable or "prefit", optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is neither binary nor multiclass, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. If "prefit" is passed, it is assumed that base_estimator has been fitted already and all data is used for calibration. Attributes ---------- classes_ : array, shape (n_classes) The class labels. calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit") The list of calibrated classifiers, one for each crossvalidation fold, which has been fitted on all but the validation fold and calibrated on the validation fold. References ---------- .. [1] Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 .. [2] Transforming Classifier Scores into Accurate Multiclass Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods, J. Platt, (1999) .. [4] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ def __init__(self, base_estimator=None, method='sigmoid', cv=3): self.base_estimator = base_estimator self.method = method self.cv = cv def fit(self, X, y, sample_weight=None): """Fit the calibrated model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) X, y = indexable(X, y) lb = LabelBinarizer().fit(y) self.classes_ = lb.classes_ # Check that each cross-validation fold can have at least one # example per class n_folds = self.cv if isinstance(self.cv, int) \ else self.cv.n_folds if hasattr(self.cv, "n_folds") else None if n_folds and \ np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]): raise ValueError("Requesting %d-fold cross-validation but provided" " less than %d examples for at least one class." % (n_folds, n_folds)) self.calibrated_classifiers_ = [] if self.base_estimator is None: # we want all classifiers that don't expose a random_state # to be deterministic (and we don't want to expose this one). base_estimator = LinearSVC(random_state=0) else: base_estimator = self.base_estimator if self.cv == "prefit": calibrated_classifier = _CalibratedClassifier( base_estimator, method=self.method) if sample_weight is not None: calibrated_classifier.fit(X, y, sample_weight) else: calibrated_classifier.fit(X, y) self.calibrated_classifiers_.append(calibrated_classifier) else: cv = check_cv(self.cv, y, classifier=True) fit_parameters = signature(base_estimator.fit).parameters estimator_name = type(base_estimator).__name__ if (sample_weight is not None and "sample_weight" not in fit_parameters): warnings.warn("%s does not support sample_weight. Samples" " weights are only used for the calibration" " itself." % estimator_name) base_estimator_sample_weight = None else: base_estimator_sample_weight = sample_weight for train, test in cv.split(X, y): this_estimator = clone(base_estimator) if base_estimator_sample_weight is not None: this_estimator.fit( X[train], y[train], sample_weight=base_estimator_sample_weight[train]) else: this_estimator.fit(X[train], y[train]) calibrated_classifier = _CalibratedClassifier( this_estimator, method=self.method) if sample_weight is not None: calibrated_classifier.fit(X[test], y[test], sample_weight[test]) else: calibrated_classifier.fit(X[test], y[test]) self.calibrated_classifiers_.append(calibrated_classifier) return self def predict_proba(self, X): """Posterior probabilities of classification This function returns posterior probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples, n_classes) The predicted probas. """ check_is_fitted(self, ["classes_", "calibrated_classifiers_"]) X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) # Compute the arithmetic mean of the predictions of the calibrated # classifiers mean_proba = np.zeros((X.shape[0], len(self.classes_))) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba += proba mean_proba /= len(self.calibrated_classifiers_) return mean_proba def predict(self, X): """Predict the target of new samples. Can be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples,) The predicted class. """ check_is_fitted(self, ["classes_", "calibrated_classifiers_"]) return self.classes_[np.argmax(self.predict_proba(X), axis=1)] class _CalibratedClassifier(object): """Probability calibration with isotonic regression or sigmoid. It assumes that base_estimator has already been fit, and trains the calibration on the input set of the fit function. Note that this class should not be used as an estimator directly. Use CalibratedClassifierCV with cv="prefit" instead. Parameters ---------- base_estimator : instance BaseEstimator The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs. No default value since it has to be an already fitted estimator. method : 'sigmoid' | 'isotonic' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parametric approach based on isotonic regression. References ---------- .. [1] Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 .. [2] Transforming Classifier Scores into Accurate Multiclass Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods, J. Platt, (1999) .. [4] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ def __init__(self, base_estimator, method='sigmoid'): self.base_estimator = base_estimator self.method = method def _preproc(self, X): n_classes = len(self.classes_) if hasattr(self.base_estimator, "decision_function"): df = self.base_estimator.decision_function(X) if df.ndim == 1: df = df[:, np.newaxis] elif hasattr(self.base_estimator, "predict_proba"): df = self.base_estimator.predict_proba(X) if n_classes == 2: df = df[:, 1:] else: raise RuntimeError('classifier has no decision_function or ' 'predict_proba method.') idx_pos_class = np.arange(df.shape[1]) return df, idx_pos_class def fit(self, X, y, sample_weight=None): """Calibrate the fitted model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ lb = LabelBinarizer() Y = lb.fit_transform(y) self.classes_ = lb.classes_ df, idx_pos_class = self._preproc(X) self.calibrators_ = [] for k, this_df in zip(idx_pos_class, df.T): if self.method == 'isotonic': calibrator = IsotonicRegression(out_of_bounds='clip') elif self.method == 'sigmoid': calibrator = _SigmoidCalibration() else: raise ValueError('method should be "sigmoid" or ' '"isotonic". Got %s.' % self.method) calibrator.fit(this_df, Y[:, k], sample_weight) self.calibrators_.append(calibrator) return self def predict_proba(self, X): """Posterior probabilities of classification This function returns posterior probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples, n_classes) The predicted probas. Can be exact zeros. """ n_classes = len(self.classes_) proba = np.zeros((X.shape[0], n_classes)) df, idx_pos_class = self._preproc(X) for k, this_df, calibrator in \ zip(idx_pos_class, df.T, self.calibrators_): if n_classes == 2: k += 1 proba[:, k] = calibrator.predict(this_df) # Normalize the probabilities if n_classes == 2: proba[:, 0] = 1. - proba[:, 1] else: proba /= np.sum(proba, axis=1)[:, np.newaxis] # XXX : for some reason all probas can be 0 proba[np.isnan(proba)] = 1. / n_classes # Deal with cases where the predicted probability minimally exceeds 1.0 proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 return proba def _sigmoid_calibration(df, y, sample_weight=None): """Probability Calibration with sigmoid method (Platt 2000) Parameters ---------- df : ndarray, shape (n_samples,) The decision function or predict proba for the samples. y : ndarray, shape (n_samples,) The targets. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- a : float The slope. b : float The intercept. References ---------- Platt, "Probabilistic Outputs for Support Vector Machines" """ df = column_or_1d(df) y = column_or_1d(y) F = df # F follows Platt's notations tiny = np.finfo(np.float).tiny # to avoid division by 0 warning # Bayesian priors (see Platt end of section 2.2) prior0 = float(np.sum(y <= 0)) prior1 = y.shape[0] - prior0 T = np.zeros(y.shape) T[y > 0] = (prior1 + 1.) / (prior1 + 2.) T[y <= 0] = 1. / (prior0 + 2.) T1 = 1. - T def objective(AB): # From Platt (beginning of Section 2.2) E = np.exp(AB[0] * F + AB[1]) P = 1. / (1. + E) l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny)) if sample_weight is not None: return (sample_weight * l).sum() else: return l.sum() def grad(AB): # gradient of the objective function E = np.exp(AB[0] * F + AB[1]) P = 1. / (1. + E) TEP_minus_T1P = P * (T * E - T1) if sample_weight is not None: TEP_minus_T1P *= sample_weight dA = np.dot(TEP_minus_T1P, F) dB = np.sum(TEP_minus_T1P) return np.array([dA, dB]) AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))]) AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False) return AB_[0], AB_[1] class _SigmoidCalibration(BaseEstimator, RegressorMixin): """Sigmoid regression model. Attributes ---------- a_ : float The slope. b_ : float The intercept. """ def fit(self, X, y, sample_weight=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples,) Training data. y : array-like, shape (n_samples,) Training target. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X = column_or_1d(X) y = column_or_1d(y) X, y = indexable(X, y) self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight) return self def predict(self, T): """Predict new data by linear interpolation. Parameters ---------- T : array-like, shape (n_samples,) Data to predict from. Returns ------- T_ : array, shape (n_samples,) The predicted data. """ T = column_or_1d(T) return 1. / (1. + np.exp(self.a_ * T + self.b_)) def calibration_curve(y_true, y_prob, normalize=False, n_bins=5): """Compute true and predicted probabilities for a calibration curve. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. normalize : bool, optional, default=False Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not a proper probability. If True, the smallest value in y_prob is mapped onto 0 and the largest one onto 1. n_bins : int Number of bins. A bigger number requires more data. Returns ------- prob_true : array, shape (n_bins,) The true probability in each bin (fraction of positives). prob_pred : array, shape (n_bins,) The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) elif y_prob.min() < 0 or y_prob.max() > 1: raise ValueError("y_prob has values outside [0, 1] and normalize is " "set to False.") y_true = _check_binary_probabilistic_predictions(y_true, y_prob) bins = np.linspace(0., 1. + 1e-8, n_bins + 1) binids = np.digitize(y_prob, bins) - 1 bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = (bin_true[nonzero] / bin_total[nonzero]) prob_pred = (bin_sums[nonzero] / bin_total[nonzero]) return prob_true, prob_pred
bsd-3-clause
pianomania/scikit-learn
sklearn/utils/tests/test_class_weight.py
8
9531
import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.datasets import make_blobs from sklearn.utils.class_weight import compute_class_weight from sklearn.utils.class_weight import compute_sample_weight from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal def test_compute_class_weight(): # Test (and demo) compute_class_weight. y = np.asarray([2, 2, 2, 3, 3, 4]) classes = np.unique(y) cw = compute_class_weight("balanced", classes, y) # total effect of samples is preserved class_counts = np.bincount(y)[2:] assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) assert_true(cw[0] < cw[1] < cw[2]) def test_compute_class_weight_not_present(): # Raise error when y does not contain all class labels classes = np.arange(4) y = np.asarray([0, 0, 0, 1, 1, 2]) assert_raises(ValueError, compute_class_weight, "balanced", classes, y) # Raise error when y has items not in classes classes = np.arange(2) assert_raises(ValueError, compute_class_weight, "balanced", classes, y) assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y) def test_compute_class_weight_dict(): classes = np.arange(3) class_weights = {0: 1.0, 1: 2.0, 2: 3.0} y = np.asarray([0, 0, 1, 2]) cw = compute_class_weight(class_weights, classes, y) # When the user specifies class weights, compute_class_weights should just # return them. assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw) # When a class weight is specified that isn't in classes, a ValueError # should get raised msg = 'Class label 4 not present.' class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5} assert_raise_message(ValueError, msg, compute_class_weight, class_weights, classes, y) msg = 'Class label -1 not present.' class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0} assert_raise_message(ValueError, msg, compute_class_weight, class_weights, classes, y) def test_compute_class_weight_invariance(): # Test that results with class_weight="balanced" is invariant wrt # class imbalance if the number of samples is identical. # The test uses a balanced two class dataset with 100 datapoints. # It creates three versions, one where class 1 is duplicated # resulting in 150 points of class 1 and 50 of class 0, # one where there are 50 points in class 1 and 150 in class 0, # and one where there are 100 points of each class (this one is balanced # again). # With balancing class weights, all three should give the same model. X, y = make_blobs(centers=2, random_state=0) # create dataset where class 1 is duplicated twice X_1 = np.vstack([X] + [X[y == 1]] * 2) y_1 = np.hstack([y] + [y[y == 1]] * 2) # create dataset where class 0 is duplicated twice X_0 = np.vstack([X] + [X[y == 0]] * 2) y_0 = np.hstack([y] + [y[y == 0]] * 2) # duplicate everything X_ = np.vstack([X] * 2) y_ = np.hstack([y] * 2) # results should be identical logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1) logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0) logreg = LogisticRegression(class_weight="balanced").fit(X_, y_) assert_array_almost_equal(logreg1.coef_, logreg0.coef_) assert_array_almost_equal(logreg.coef_, logreg0.coef_) def test_compute_class_weight_balanced_negative(): # Test compute_class_weight when labels are negative # Test with balanced class labels. classes = np.array([-2, -1, 0]) y = np.asarray([-1, -1, 0, 0, -2, -2]) cw = compute_class_weight("balanced", classes, y) assert_equal(len(cw), len(classes)) assert_array_almost_equal(cw, np.array([1., 1., 1.])) # Test with unbalanced class labels. y = np.asarray([-1, 0, 0, -2, -2, -2]) cw = compute_class_weight("balanced", classes, y) assert_equal(len(cw), len(classes)) class_counts = np.bincount(y + 2) assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) assert_array_almost_equal(cw, [2. / 3, 2., 1.]) def test_compute_class_weight_balanced_unordered(): # Test compute_class_weight when classes are unordered classes = np.array([1, 0, 3]) y = np.asarray([1, 0, 0, 3, 3, 3]) cw = compute_class_weight("balanced", classes, y) class_counts = np.bincount(y)[classes] assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) assert_array_almost_equal(cw, [2., 1., 2. / 3]) def test_compute_sample_weight(): # Test (and demo) compute_sample_weight. # Test with balanced classes y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with user-defined weights sample_weight = compute_sample_weight({1: 2, 2: 1}, y) assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.]) # Test with column vector of balanced classes y = np.asarray([[1], [1], [1], [2], [2], [2]]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with unbalanced classes y = np.asarray([1, 1, 1, 2, 2, 2, 3]) sample_weight = compute_sample_weight("balanced", y) expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333]) assert_array_almost_equal(sample_weight, expected_balanced, decimal=4) # Test with `None` weights sample_weight = compute_sample_weight(None, y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.]) # Test with multi-output of balanced classes y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with multi-output with user-defined weights y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y) assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.]) # Test with multi-output of unbalanced classes y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3) def test_compute_sample_weight_with_subsample(): # Test compute_sample_weight with subsamples specified. # Test with balanced classes and all samples present y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with column vector of balanced classes and all samples present y = np.asarray([[1], [1], [1], [2], [2], [2]]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with a subsample y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y, range(4)) assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3, 2. / 3, 2., 2., 2.]) # Test with a bootstrap subsample y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3]) expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.]) assert_array_almost_equal(sample_weight, expected_balanced) # Test with a bootstrap subsample for multi-output y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3]) assert_array_almost_equal(sample_weight, expected_balanced ** 2) # Test with a missing class y = np.asarray([1, 1, 1, 2, 2, 2, 3]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.]) # Test with a missing class for multi-output y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.]) def test_compute_sample_weight_errors(): # Test compute_sample_weight raises errors expected. # Invalid preset string y = np.asarray([1, 1, 1, 2, 2, 2]) y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) assert_raises(ValueError, compute_sample_weight, "ni", y) assert_raises(ValueError, compute_sample_weight, "ni", y, range(4)) assert_raises(ValueError, compute_sample_weight, "ni", y_) assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4)) # Not "balanced" for subsample assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y, range(4)) # Not a list or preset for multi-output assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_) # Incorrect length list for multi-output assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
bsd-3-clause
PatrickOReilly/scikit-learn
examples/model_selection/grid_search_digits.py
7
2760
""" ============================================================ Parameter estimation using grid search with cross-validation ============================================================ This examples shows how a classifier is optimized by cross-validation, which is done using the :class:`sklearn.model_selection.GridSearchCV` object on a development set that comprises only half of the available labeled data. The performance of the selected hyper-parameters and trained model is then measured on a dedicated evaluation set that was not used during the model selection step. More details on tools available for model selection can be found in the sections on :ref:`cross_validation` and :ref:`grid_search`. """ from __future__ import print_function from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report from sklearn.svm import SVC print(__doc__) # Loading the Digits dataset digits = datasets.load_digits() # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) X = digits.images.reshape((n_samples, -1)) y = digits.target # Split the dataset in two equal parts X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=0) # Set the parameters by cross-validation tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] scores = ['precision', 'recall'] for score in scores: print("# Tuning hyper-parameters for %s" % score) print() clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_macro' % score) clf.fit(X_train, y_train) print("Best parameters set found on development set:") print() print(clf.best_params_) print() print("Grid scores on development set:") print() means = clf.results_['test_mean_score'] stds = clf.results_['test_std_score'] for i in range(len(clf.results_['params'])): print("%0.3f (+/-%0.03f) for %r" % (means[i], stds[i] * 2, clf.results_['params'][i])) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = y_test, clf.predict(X_test) print(classification_report(y_true, y_pred)) print() # Note the problem is too easy: the hyperparameter plateau is too flat and the # output model is the same for precision and recall with ties in quality.
bsd-3-clause
ray-project/ray
rllib/utils/typing.py
1
8294
from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union, ) import numpy as np import gym from ray.rllib.utils.annotations import ExperimentalAPI if TYPE_CHECKING: from ray.rllib.env.env_context import EnvContext from ray.rllib.policy.dynamic_tf_policy_v2 import DynamicTFPolicyV2 from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 from ray.rllib.policy.policy import PolicySpec from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch from ray.rllib.policy.view_requirement import ViewRequirement from ray.rllib.utils import try_import_tf, try_import_torch _, tf, _ = try_import_tf() torch, _ = try_import_torch() # Represents a generic tensor type. # This could be an np.ndarray, tf.Tensor, or a torch.Tensor. TensorType = Union[np.array, "tf.Tensor", "torch.Tensor"] # Either a plain tensor, or a dict or tuple of tensors (or StructTensors). TensorStructType = Union[TensorType, dict, tuple] # A shape of a tensor. TensorShape = Union[Tuple[int], List[int]] # Represents a fully filled out config of a Algorithm class. # Note: Policy config dicts are usually the same as AlgorithmConfigDict, but # parts of it may sometimes be altered in e.g. a multi-agent setup, # where we have >1 Policies in the same Algorithm. AlgorithmConfigDict = TrainerConfigDict = dict # An algorithm config dict that only has overrides. It needs to be combined with # the default algorithm config to be used. PartialAlgorithmConfigDict = PartialTrainerConfigDict = dict # Represents the model config sub-dict of the algo config that is passed to # the model catalog. ModelConfigDict = dict # Objects that can be created through the `from_config()` util method # need a config dict with a "type" key, a class path (str), or a type directly. FromConfigSpec = Union[Dict[str, Any], type, str] # Represents the env_config sub-dict of the algo config that is passed to # the env constructor. EnvConfigDict = dict # Represents an environment id. These could be: # - An int index for a sub-env within a vectorized env. # - An external env ID (str), which changes(!) each episode. EnvID = Union[int, str] # Represents a BaseEnv, MultiAgentEnv, ExternalEnv, ExternalMultiAgentEnv, # VectorEnv, gym.Env, or ActorHandle. EnvType = Any # A callable, taking a EnvContext object # (config dict + properties: `worker_index`, `vector_index`, `num_workers`, # and `remote`) and returning an env object (or None if no env is used). EnvCreator = Callable[["EnvContext"], Optional[EnvType]] # Represents a generic identifier for an agent (e.g., "agent1"). AgentID = Any # Represents a generic identifier for a policy (e.g., "pol1"). PolicyID = str # Type of the config["multiagent"]["policies"] dict for multi-agent training. MultiAgentPolicyConfigDict = Dict[PolicyID, "PolicySpec"] # State dict of a Policy, mapping strings (e.g. "weights") to some state # data (TensorStructType). PolicyState = Dict[str, TensorStructType] # Any tf Policy type (static-graph or eager Policy). TFPolicyV2Type = Type[Union["DynamicTFPolicyV2", "EagerTFPolicyV2"]] # Represents an episode id. EpisodeID = int # Represents an "unroll" (maybe across different sub-envs in a vector env). UnrollID = int # A dict keyed by agent ids, e.g. {"agent-1": value}. MultiAgentDict = Dict[AgentID, Any] # A dict keyed by env ids that contain further nested dictionaries keyed by # agent ids. e.g., {"env-1": {"agent-1": value}}. MultiEnvDict = Dict[EnvID, MultiAgentDict] # Represents an observation returned from the env. EnvObsType = Any # Represents an action passed to the env. EnvActionType = Any # Info dictionary returned by calling step() on gym envs. Commonly empty dict. EnvInfoDict = dict # Represents a File object FileType = Any # Represents a ViewRequirements dict mapping column names (str) to # ViewRequirement objects. ViewRequirementsDict = Dict[str, "ViewRequirement"] # Represents the result dict returned by Algorithm.train(). ResultDict = dict # A tf or torch local optimizer object. LocalOptimizer = Union["tf.keras.optimizers.Optimizer", "torch.optim.Optimizer"] # Dict of tensors returned by compute gradients on the policy, e.g., # {"td_error": [...], "learner_stats": {"vf_loss": ..., ...}}, for multi-agent, # {"policy1": {"learner_stats": ..., }, "policy2": ...}. GradInfoDict = dict # Dict of learner stats returned by compute gradients on the policy, e.g., # {"vf_loss": ..., ...}. This will always be nested under the "learner_stats" # key(s) of a GradInfoDict. In the multi-agent case, this will be keyed by # policy id. LearnerStatsDict = dict # List of grads+var tuples (tf) or list of gradient tensors (torch) # representing model gradients and returned by compute_gradients(). ModelGradients = Union[List[Tuple[TensorType, TensorType]], List[TensorType]] # Type of dict returned by get_weights() representing model weights. ModelWeights = dict # An input dict used for direct ModelV2 calls. ModelInputDict = Dict[str, TensorType] # Some kind of sample batch. SampleBatchType = Union["SampleBatch", "MultiAgentBatch"] # A (possibly nested) space struct: Either a gym.spaces.Space or a # (possibly nested) dict|tuple of gym.space.Spaces. SpaceStruct = Union[gym.spaces.Space, dict, tuple] # A list of batches of RNN states. # Each item in this list has dimension [B, S] (S=state vector size) StateBatches = List[List[Any]] # Format of data output from policy forward pass. # __sphinx_doc_begin_policy_output_type__ PolicyOutputType = Tuple[TensorStructType, StateBatches, Dict] # __sphinx_doc_end_policy_output_type__ # __sphinx_doc_begin_agent_connector_data_type__ @ExperimentalAPI class AgentConnectorDataType: """Data type that is fed into and yielded from agent connectors. Args: env_id: ID of the environment. agent_id: ID to help identify the agent from which the data is received. data: A payload (``data``). With RLlib's default sampler, the payload is a dictionary of arbitrary data columns (obs, rewards, dones, etc). """ def __init__(self, env_id: str, agent_id: str, data: Any): self.env_id = env_id self.agent_id = agent_id self.data = data # __sphinx_doc_end_agent_connector_data_type__ # __sphinx_doc_begin_action_connector_output__ @ExperimentalAPI class ActionConnectorDataType: """Data type that is fed into and yielded from agent connectors. Args: env_id: ID of the environment. agent_id: ID to help identify the agent from which the data is received. input_dict: Input data that was passed into the policy. Sometimes output must be adapted based on the input, for example action masking. So the entire input data structure is provided here. output: An object of PolicyOutputType. It is is composed of the action output, the internal state output, and additional data fetches. """ def __init__( self, env_id: str, agent_id: str, input_dict: TensorStructType, output: PolicyOutputType, ): self.env_id = env_id self.agent_id = agent_id self.input_dict = input_dict self.output = output # __sphinx_doc_end_action_connector_output__ # __sphinx_doc_begin_agent_connector_output__ @ExperimentalAPI class AgentConnectorsOutput: """Final output data type of agent connectors. Args are populated depending on the AgentConnector settings. The branching happens in ViewRequirementAgentConnector. Args: raw_dict: The raw input dictionary that sampler can use to build episodes and training batches. This raw dict also gets passed into ActionConnectors in case it contains data useful for action adaptation (e.g. action masks). sample_batch: The SampleBatch that can be immediately used for querying the policy for next action. """ def __init__( self, raw_dict: Dict[str, TensorStructType], sample_batch: "SampleBatch" ): self.raw_dict = raw_dict self.sample_batch = sample_batch # __sphinx_doc_end_agent_connector_output__ # Generic type var. T = TypeVar("T")
apache-2.0
yyjiang/scikit-learn
sklearn/datasets/samples_generator.py
44
56433
""" Generate samples of synthetic data sets. """ # Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, # G. Louppe, J. Nothman # License: BSD 3 clause import numbers import warnings import array import numpy as np from scipy import linalg import scipy.sparse as sp from ..preprocessing import MultiLabelBinarizer from ..utils import check_array, check_random_state from ..utils import shuffle as util_shuffle from ..utils.fixes import astype from ..utils.random import sample_without_replacement from ..externals import six map = six.moves.map zip = six.moves.zip def _generate_hypercube(samples, dimensions, rng): """Returns distinct binary samples of length dimensions """ if dimensions > 30: return np.hstack([_generate_hypercube(samples, dimensions - 30, rng), _generate_hypercube(samples, 30, rng)]) out = astype(sample_without_replacement(2 ** dimensions, samples, random_state=rng), dtype='>u4', copy=False) out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:] return out def make_classification(n_samples=100, n_features=20, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None): """Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Prior to shuffling, `X` stacks a number of these primary "informative" features, "redundant" linear combinations of these, "repeated" duplicates of sampled features, and arbitrary noise for and remaining features. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. These comprise `n_informative` informative features, `n_redundant` redundant features, `n_repeated` duplicated features and `n_features-n_informative-n_redundant- n_repeated` useless features drawn at random. n_informative : int, optional (default=2) The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension `n_informative`. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, optional (default=2) The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, optional (default=0) The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, optional (default=2) The number of classes (or labels) of the classification problem. n_clusters_per_class : int, optional (default=2) The number of clusters per class. weights : list of floats or None (default=None) The proportions of samples assigned to each class. If None, then classes are balanced. Note that if `len(weights) == n_classes - 1`, then the last class weight is automatically inferred. More than `n_samples` samples may be returned if the sum of `weights` exceeds 1. flip_y : float, optional (default=0.01) The fraction of samples whose class are randomly exchanged. class_sep : float, optional (default=1.0) The factor multiplying the hypercube dimension. hypercube : boolean, optional (default=True) If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, array of shape [n_features] or None, optional (default=0.0) Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, array of shape [n_features] or None, optional (default=1.0) Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : boolean, optional (default=True) Shuffle the samples and the features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. See also -------- make_blobs: simplified variant make_multilabel_classification: unrelated generator for multilabel tasks """ generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError("Number of informative, redundant and repeated " "features must sum to less than the number of total" " features") if 2 ** n_informative < n_classes * n_clusters_per_class: raise ValueError("n_classes * n_clusters_per_class must" " be smaller or equal 2 ** n_informative") if weights and len(weights) not in [n_classes, n_classes - 1]: raise ValueError("Weights specified but incompatible with number " "of classes.") n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class if weights and len(weights) == (n_classes - 1): weights.append(1.0 - sum(weights)) if weights is None: weights = [1.0 / n_classes] * n_classes weights[-1] = 1.0 - sum(weights[:-1]) # Distribute samples among clusters by weight n_samples_per_cluster = [] for k in range(n_clusters): n_samples_per_cluster.append(int(n_samples * weights[k % n_classes] / n_clusters_per_class)) for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Intialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=np.int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(float) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.rand(n_clusters, 1) centroids *= generator.rand(1, n_informative) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.randn(n_samples, n_informative) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.rand(n_informative, n_informative) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.rand(n_informative, n_redundant) - 1 X[:, n_informative:n_informative + n_redundant] = \ np.dot(X[:, :n_informative], B) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp) X[:, n:n + n_repeated] = X[:, indices] # Fill useless features if n_useless > 0: X[:, -n_useless:] = generator.randn(n_samples, n_useless) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.rand(n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.rand(n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.rand(n_features) X *= scale if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, sparse=False, return_indicator=False, return_distributions=False, random_state=None): """Generate a random multilabel classification problem. For each sample, the generative process is: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is never zero or more than `n_classes`, and that the document length is never zero. Likewise, we reject classes which have already been chosen. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. n_classes : int, optional (default=5) The number of classes of the classification problem. n_labels : int, optional (default=2) The average number of labels per instance. More precisely, the number of labels per sample is drawn from a Poisson distribution with ``n_labels`` as its expected value, but samples are bounded (using rejection sampling) by ``n_classes``, and must be nonzero if ``allow_unlabeled`` is False. length : int, optional (default=50) The sum of the features (number of words if documents) is drawn from a Poisson distribution with this expected value. allow_unlabeled : bool, optional (default=True) If ``True``, some instances might not belong to any class. sparse : bool, optional (default=False) If ``True``, return a sparse feature matrix return_indicator : bool, optional (default=False), If ``True``, return ``Y`` in the binary indicator format, else return a tuple of lists of labels. return_distributions : bool, optional (default=False) If ``True``, return the prior class probability and conditional probabilities of features given classes, from which the data was drawn. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array or sparse CSR matrix of shape [n_samples, n_features] The generated samples. Y : tuple of lists or array of shape [n_samples, n_classes] The label sets. p_c : array, shape [n_classes] The probability of each class being drawn. Only returned if ``return_distributions=True``. p_w_c : array, shape [n_features, n_classes] The probability of each feature being drawn given each class. Only returned if ``return_distributions=True``. """ generator = check_random_state(random_state) p_c = generator.rand(n_classes) p_c /= p_c.sum() cumulative_p_c = np.cumsum(p_c) p_w_c = generator.rand(n_features, n_classes) p_w_c /= np.sum(p_w_c, axis=0) def sample_example(): _, n_classes = p_w_c.shape # pick a nonzero number of labels per document by rejection sampling y_size = n_classes + 1 while (not allow_unlabeled and y_size == 0) or y_size > n_classes: y_size = generator.poisson(n_labels) # pick n classes y = set() while len(y) != y_size: # pick a class with probability P(c) c = np.searchsorted(cumulative_p_c, generator.rand(y_size - len(y))) y.update(c) y = list(y) # pick a non-zero document length by rejection sampling n_words = 0 while n_words == 0: n_words = generator.poisson(length) # generate a document of length n_words if len(y) == 0: # if sample does not belong to any class, generate noise word words = generator.randint(n_features, size=n_words) return words, y # sample words with replacement from selected classes cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() cumulative_p_w_sample /= cumulative_p_w_sample[-1] words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words)) return words, y X_indices = array.array('i') X_indptr = array.array('i', [0]) Y = [] for i in range(n_samples): words, y = sample_example() X_indices.extend(words) X_indptr.append(len(X_indices)) Y.append(y) X_data = np.ones(len(X_indices), dtype=np.float64) X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) X.sum_duplicates() if not sparse: X = X.toarray() if return_indicator: lb = MultiLabelBinarizer() Y = lb.fit([range(n_classes)]).transform(Y) else: warnings.warn('Support for the sequence of sequences multilabel ' 'representation is being deprecated and replaced with ' 'a sparse indicator matrix. ' 'return_indicator will default to True from version ' '0.17.', DeprecationWarning) if return_distributions: return X, Y, p_c, p_w_c return X, Y def make_hastie_10_2(n_samples=12000, random_state=None): """Generates data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target ``y`` is defined by:: y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=12000) The number of samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 10] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. See also -------- make_gaussian_quantiles: a generalization of this dataset approach """ rs = check_random_state(random_state) shape = (n_samples, 10) X = rs.normal(size=shape).reshape(shape) y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64) y[y == 0.0] = -1.0 return X, y def make_regression(n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None): """Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`make_low_rank_matrix` for more details. The output is generated by applying a (potentially biased) random linear regression model with `n_informative` nonzero regressors to the previously generated input and some gaussian centered noise with some adjustable scale. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. n_informative : int, optional (default=10) The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, optional (default=1) The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, optional (default=0.0) The bias term in the underlying linear model. effective_rank : int or None, optional (default=None) if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. shuffle : boolean, optional (default=True) Shuffle the samples and the features. coef : boolean, optional (default=False) If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True. """ n_informative = min(n_features, n_informative) generator = check_random_state(random_state) if effective_rank is None: # Randomly generate a well conditioned input set X = generator.randn(n_samples, n_features) else: # Randomly generate a low rank, fat tail input set X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=effective_rank, tail_strength=tail_strength, random_state=generator) # Generate a ground truth model with only n_informative features being non # zeros (the other features are not correlated to y and should be ignored # by a sparsifying regularizers such as L1 or elastic net) ground_truth = np.zeros((n_features, n_targets)) ground_truth[:n_informative, :] = 100 * generator.rand(n_informative, n_targets) y = np.dot(X, ground_truth) + bias # Add noise if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] ground_truth = ground_truth[indices] y = np.squeeze(y) if coef: return X, y, np.squeeze(ground_truth) else: return X, y def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None, factor=.8): """Make a large circle containing a smaller circle in 2d. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle: bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. factor : double < 1 (default=.8) Scale factor between inner and outer circle. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ if factor > 1 or factor < 0: raise ValueError("'factor' has to be between 0 and 1.") generator = check_random_state(random_state) # so as not to have the first point = last point, we add one and then # remove it. linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1] outer_circ_x = np.cos(linspace) outer_circ_y = np.sin(linspace) inner_circ_x = outer_circ_x * factor inner_circ_y = outer_circ_y * factor X = np.vstack((np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp), np.ones(n_samples // 2, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None): """Make two interleaving half circles A simple toy dataset to visualize clustering and classification algorithms. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle : bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. Read more in the :ref:`User Guide <sample_generators>`. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ n_samples_out = n_samples // 2 n_samples_in = n_samples - n_samples_out generator = check_random_state(random_state) outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5 X = np.vstack((np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples_in, dtype=np.intp), np.ones(n_samples_out, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None): """Generate isotropic Gaussian blobs for clustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points equally divided among clusters. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=3) The number of centers to generate, or the fixed center locations. cluster_std: float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box: pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from sklearn.datasets.samples_generator import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) See also -------- make_classification: a more intricate variant """ generator = check_random_state(random_state) if isinstance(centers, numbers.Integral): centers = generator.uniform(center_box[0], center_box[1], size=(centers, n_features)) else: centers = check_array(centers) n_features = centers.shape[1] if isinstance(cluster_std, numbers.Real): cluster_std = np.ones(len(centers)) * cluster_std X = [] y = [] n_centers = centers.shape[0] n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in range(n_samples % n_centers): n_samples_per_center[i] += 1 for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): X.append(centers[i] + generator.normal(scale=std, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: indices = np.arange(n_samples) generator.shuffle(indices) X = X[indices] y = y[indices] return X, y def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None): """Generate the "Friedman \#1" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are independent features uniformly distributed on the interval [0, 1]. The output `y` is created according to the formula:: y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). Out of the `n_features` features, only 5 are actually used to compute `y`. The remaining features are independent of `y`. The number of features has to be >= 5. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. Should be at least 5. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ if n_features < 5: raise ValueError("n_features must be at least five.") generator = check_random_state(random_state) X = generator.rand(n_samples, n_features) y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples) return X, y def make_friedman2(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman \#2" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \ + noise * generator.randn(n_samples) return X, y def make_friedman3(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman \#3" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ / X[:, 0]) + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \ + noise * generator.randn(n_samples) return X, y def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10, tail_strength=0.5, random_state=None): """Generate a mostly low rank matrix with bell-shaped singular values Most of the variance can be explained by a bell-shaped curve of width effective_rank: the low rank part of the singular values profile is:: (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) The remaining singular values' tail is fat, decreasing as:: tail_strength * exp(-0.1 * i / effective_rank). The low rank part of the profile can be considered the structured signal part of the data while the tail can be considered the noisy part of the data that cannot be summarized by a low number of linear components (singular vectors). This kind of singular profiles is often seen in practice, for instance: - gray level pictures of faces - TF-IDF vectors of text documents crawled from the web Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. effective_rank : int, optional (default=10) The approximate number of singular vectors required to explain most of the data by linear combinations. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The matrix. """ generator = check_random_state(random_state) n = min(n_samples, n_features) # Random (ortho normal) vectors u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic') v, _ = linalg.qr(generator.randn(n_features, n), mode='economic') # Index of the singular values singular_ind = np.arange(n, dtype=np.float64) # Build the singular profile by assembling signal and noise components low_rank = ((1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)) tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) s = np.identity(n) * (low_rank + tail) return np.dot(np.dot(u, s), v.T) def make_sparse_coded_signal(n_samples, n_components, n_features, n_nonzero_coefs, random_state=None): """Generate a signal as a sparse combination of dictionary elements. Returns a matrix Y = DX, such as D is (n_features, n_components), X is (n_components, n_samples) and each column of X has exactly n_nonzero_coefs non-zero elements. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int number of samples to generate n_components: int, number of components in the dictionary n_features : int number of features of the dataset to generate n_nonzero_coefs : int number of active (non-zero) coefficients in each sample random_state: int or RandomState instance, optional (default=None) seed used by the pseudo random number generator Returns ------- data: array of shape [n_features, n_samples] The encoded signal (Y). dictionary: array of shape [n_features, n_components] The dictionary with normalized components (D). code: array of shape [n_components, n_samples] The sparse code such that each column of this matrix has exactly n_nonzero_coefs non-zero items (X). """ generator = check_random_state(random_state) # generate dictionary D = generator.randn(n_features, n_components) D /= np.sqrt(np.sum((D ** 2), axis=0)) # generate code X = np.zeros((n_components, n_samples)) for i in range(n_samples): idx = np.arange(n_components) generator.shuffle(idx) idx = idx[:n_nonzero_coefs] X[idx, i] = generator.randn(n_nonzero_coefs) # encode signal Y = np.dot(D, X) return map(np.squeeze, (Y, D, X)) def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None): """Generate a random regression problem with sparse uncorrelated design This dataset is described in Celeux et al [1]. as:: X ~ N(0, 1) y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] Only the first 4 features are informative. The remaining features are useless. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, "Regularization in regression: comparing Bayesian and frequentist methods in a poorly informative situation", 2009. """ generator = check_random_state(random_state) X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) y = generator.normal(loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), scale=np.ones(n_samples)) return X, y def make_spd_matrix(n_dim, random_state=None): """Generate a random symmetric, positive-definite matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_dim : int The matrix dimension. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_dim, n_dim] The random symmetric, positive-definite matrix. See also -------- make_sparse_spd_matrix """ generator = check_random_state(random_state) A = generator.rand(n_dim, n_dim) U, s, V = linalg.svd(np.dot(A.T, A)) X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V) return X def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False, smallest_coef=.1, largest_coef=.9, random_state=None): """Generate a sparse symmetric definite positive matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- dim: integer, optional (default=1) The size of the random matrix to generate. alpha: float between 0 and 1, optional (default=0.95) The probability that a coefficient is non zero (see notes). random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. largest_coef : float between 0 and 1, optional (default=0.9) The value of the largest coefficient. smallest_coef : float between 0 and 1, optional (default=0.1) The value of the smallest coefficient. norm_diag : boolean, optional (default=False) Whether to normalize the output matrix to make the leading diagonal elements all 1 Returns ------- prec : sparse matrix of shape (dim, dim) The generated matrix. Notes ----- The sparsity is actually imposed on the cholesky factor of the matrix. Thus alpha does not translate directly into the filling fraction of the matrix itself. See also -------- make_spd_matrix """ random_state = check_random_state(random_state) chol = -np.eye(dim) aux = random_state.rand(dim, dim) aux[aux < alpha] = 0 aux[aux > alpha] = (smallest_coef + (largest_coef - smallest_coef) * random_state.rand(np.sum(aux > alpha))) aux = np.tril(aux, k=-1) # Permute the lines: we don't want to have asymmetries in the final # SPD matrix permutation = random_state.permutation(dim) aux = aux[permutation].T[permutation] chol += aux prec = np.dot(chol.T, chol) if norm_diag: # Form the diagonal vector into a row matrix d = np.diag(prec).reshape(1, prec.shape[0]) d = 1. / np.sqrt(d) prec *= d prec *= d.T return prec def make_swiss_roll(n_samples=100, noise=0.0, random_state=None): """Generate a swiss roll dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. Notes ----- The algorithm is from Marsland [1]. References ---------- .. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", Chapter 10, 2009. http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py """ generator = check_random_state(random_state) t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples)) x = t * np.cos(t) y = 21 * generator.rand(1, n_samples) z = t * np.sin(t) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_s_curve(n_samples=100, noise=0.0, random_state=None): """Generate an S curve dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. """ generator = check_random_state(random_state) t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5) x = np.sin(t) y = 2.0 * generator.rand(1, n_samples) z = np.sign(t) * (np.cos(t) - 1) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_gaussian_quantiles(mean=None, cov=1., n_samples=100, n_features=2, n_classes=3, shuffle=True, random_state=None): """Generate isotropic Gaussian and label samples by quantile This classification dataset is constructed by taking a multi-dimensional standard normal distribution and defining classes separated by nested concentric multi-dimensional spheres such that roughly equal numbers of samples are in each class (quantiles of the :math:`\chi^2` distribution). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- mean : array of shape [n_features], optional (default=None) The mean of the multi-dimensional normal distribution. If None then use the origin (0, 0, ...). cov : float, optional (default=1.) The covariance matrix will be this value times the unit matrix. This dataset only produces symmetric normal distributions. n_samples : int, optional (default=100) The total number of points equally divided among classes. n_features : int, optional (default=2) The number of features for each sample. n_classes : int, optional (default=3) The number of classes shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for quantile membership of each sample. Notes ----- The dataset is from Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ if n_samples < n_classes: raise ValueError("n_samples must be at least n_classes") generator = check_random_state(random_state) if mean is None: mean = np.zeros(n_features) else: mean = np.array(mean) # Build multivariate normal distribution X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,)) # Sort by distance from origin idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1)) X = X[idx, :] # Label by quantile step = n_samples // n_classes y = np.hstack([np.repeat(np.arange(n_classes), step), np.repeat(n_classes - 1, n_samples - step * n_classes)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) return X, y def _shuffle(data, random_state=None): generator = check_random_state(random_state) n_rows, n_cols = data.shape row_idx = generator.permutation(n_rows) col_idx = generator.permutation(n_cols) result = data[row_idx][:, col_idx] return result, row_idx, col_idx def make_biclusters(shape, n_clusters, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with constant block diagonal structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer The number of biclusters. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. minval : int, optional (default=10) Minimum value of a bicluster. maxval : int, optional (default=100) Maximum value of a bicluster. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Dhillon, I. S. (2001, August). Co-clustering documents and words using bipartite spectral graph partitioning. In Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 269-274). ACM. See also -------- make_checkerboard """ generator = check_random_state(random_state) n_rows, n_cols = shape consts = generator.uniform(minval, maxval, n_clusters) # row and column clusters of approximately equal sizes row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_clusters): selector = np.outer(row_labels == i, col_labels == i) result[selector] += consts[i] if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack(row_labels == c for c in range(n_clusters)) cols = np.vstack(col_labels == c for c in range(n_clusters)) return result, rows, cols def make_checkerboard(shape, n_clusters, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with block checkerboard structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer or iterable (n_row_clusters, n_column_clusters) The number of row and column clusters. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. minval : int, optional (default=10) Minimum value of a bicluster. maxval : int, optional (default=100) Maximum value of a bicluster. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003). Spectral biclustering of microarray data: coclustering genes and conditions. Genome research, 13(4), 703-716. See also -------- make_biclusters """ generator = check_random_state(random_state) if hasattr(n_clusters, "__len__"): n_row_clusters, n_col_clusters = n_clusters else: n_row_clusters = n_col_clusters = n_clusters # row and column clusters of approximately equal sizes n_rows, n_cols = shape row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_row_clusters): for j in range(n_col_clusters): selector = np.outer(row_labels == i, col_labels == j) result[selector] += generator.uniform(minval, maxval) if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack(row_labels == label for label in range(n_row_clusters) for _ in range(n_col_clusters)) cols = np.vstack(col_labels == label for _ in range(n_row_clusters) for label in range(n_col_clusters)) return result, rows, cols
bsd-3-clause
astocko/statsmodels
examples/incomplete/dates.py
29
1251
""" Using dates with timeseries models """ import statsmodels.api as sm import pandas as pd # Getting started # --------------- data = sm.datasets.sunspots.load() # Right now an annual date series must be datetimes at the end of the year. dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog)) # Using Pandas # ------------ # Make a pandas TimeSeries or DataFrame endog = pd.TimeSeries(data.endog, index=dates) # and instantiate the model ar_model = sm.tsa.AR(endog, freq='A') pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1) # Let's do some out-of-sample prediction pred = pandas_ar_res.predict(start='2005', end='2015') print(pred) # Using explicit dates # -------------------- ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A') ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1) pred = ar_res.predict(start='2005', end='2015') print(pred) # This just returns a regular array, but since the model has date information # attached, you can get the prediction dates in a roundabout way. print(ar_res.data.predict_dates) # This attribute only exists if predict has been called. It holds the dates # associated with the last call to predict. #..TODO: should this be attached to the results instance?
bsd-3-clause
giorgiop/scikit-learn
examples/linear_model/plot_ridge_coeffs.py
146
2785
""" ============================================================== Plot Ridge coefficients as a function of the L2 regularization ============================================================== .. currentmodule:: sklearn.linear_model :class:`Ridge` Regression is the estimator used in this example. Each color in the left plot represents one different dimension of the coefficient vector, and this is displayed as a function of the regularization parameter. The right plot shows how exact the solution is. This example illustrates how a well defined solution is found by Ridge regression and how regularization affects the coefficients and their values. The plot on the right shows how the difference of the coefficients from the estimator changes as a function of regularization. In this example the dependent variable Y is set as a function of the input features: y = X*w + c. The coefficient vector w is randomly sampled from a normal distribution, whereas the bias term c is set to a constant. As alpha tends toward zero the coefficients found by Ridge regression stabilize towards the randomly sampled vector w. For big alpha (strong regularisation) the coefficients are smaller (eventually converging at 0) leading to a simpler and biased solution. These dependencies can be observed on the left plot. The right plot shows the mean squared error between the coefficients found by the model and the chosen vector w. Less regularised models retrieve the exact coefficients (error is equal to 0), stronger regularised models increase the error. Please note that in this example the data is non-noisy, hence it is possible to extract the exact coefficients. """ # Author: Kornel Kielczewski -- <kornel.k@plusnet.pl> print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error clf = Ridge() X, y, w = make_regression(n_samples=10, n_features=10, coef=True, random_state=1, bias=3.5) coefs = [] errors = [] alphas = np.logspace(-6, 6, 200) # Train the model with different regularisation strengths for a in alphas: clf.set_params(alpha=a) clf.fit(X, y) coefs.append(clf.coef_) errors.append(mean_squared_error(clf.coef_, w)) # Display results plt.figure(figsize=(20, 6)) plt.subplot(121) ax = plt.gca() ax.plot(alphas, coefs) ax.set_xscale('log') plt.xlabel('alpha') plt.ylabel('weights') plt.title('Ridge coefficients as a function of the regularization') plt.axis('tight') plt.subplot(122) ax = plt.gca() ax.plot(alphas, errors) ax.set_xscale('log') plt.xlabel('alpha') plt.ylabel('error') plt.title('Coefficient error as a function of the regularization') plt.axis('tight') plt.show()
bsd-3-clause
theoryno3/scikit-learn
sklearn/neural_network/rbm.py
18
12245
"""Restricted Boltzmann Machine """ # Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca> # Vlad Niculae # Gabriel Synnaeve # Lars Buitinck # License: BSD 3 clause import time import numpy as np import scipy.sparse as sp from ..base import BaseEstimator from ..base import TransformerMixin from ..externals.six.moves import xrange from ..utils import check_array from ..utils import check_random_state from ..utils import gen_even_slices from ..utils import issparse from ..utils.extmath import safe_sparse_dot from ..utils.extmath import log_logistic from ..utils.fixes import expit # logistic function from ..utils.validation import check_is_fitted class BernoulliRBM(BaseEstimator, TransformerMixin): """Bernoulli Restricted Boltzmann Machine (RBM). A Restricted Boltzmann Machine with binary visible units and binary hiddens. Parameters are estimated using Stochastic Maximum Likelihood (SML), also known as Persistent Contrastive Divergence (PCD) [2]. The time complexity of this implementation is ``O(d ** 2)`` assuming d ~ n_features ~ n_components. Parameters ---------- n_components : int, optional Number of binary hidden units. learning_rate : float, optional The learning rate for weight updates. It is *highly* recommended to tune this hyper-parameter. Reasonable values are in the 10**[0., -3.] range. batch_size : int, optional Number of examples per minibatch. n_iter : int, optional Number of iterations/sweeps over the training dataset to perform during training. verbose : int, optional The verbosity level. The default, zero, means silent mode. random_state : integer or numpy.RandomState, optional A random number generator instance to define the state of the random permutations generator. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- intercept_hidden_ : array-like, shape (n_components,) Biases of the hidden units. intercept_visible_ : array-like, shape (n_features,) Biases of the visible units. components_ : array-like, shape (n_components, n_features) Weight matrix, where n_features in the number of visible units and n_components is the number of hidden units. Examples -------- >>> import numpy as np >>> from sklearn.neural_network import BernoulliRBM >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = BernoulliRBM(n_components=2) >>> model.fit(X) BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10, random_state=None, verbose=0) References ---------- [1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for deep belief nets. Neural Computation 18, pp 1527-1554. http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf [2] Tieleman, T. Training Restricted Boltzmann Machines using Approximations to the Likelihood Gradient. International Conference on Machine Learning (ICML) 2008 """ def __init__(self, n_components=256, learning_rate=0.1, batch_size=10, n_iter=10, verbose=0, random_state=None): self.n_components = n_components self.learning_rate = learning_rate self.batch_size = batch_size self.n_iter = n_iter self.verbose = verbose self.random_state = random_state def transform(self, X): """Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) The data to be transformed. Returns ------- h : array, shape (n_samples, n_components) Latent representations of the data. """ check_is_fitted(self, "components_") X = check_array(X, accept_sparse='csr', dtype=np.float) return self._mean_hiddens(X) def _mean_hiddens(self, v): """Computes the probabilities P(h=1|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- h : array-like, shape (n_samples, n_components) Corresponding mean field values for the hidden layer. """ p = safe_sparse_dot(v, self.components_.T) p += self.intercept_hidden_ return expit(p, out=p) def _sample_hiddens(self, v, rng): """Sample from the distribution P(h|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState Random number generator to use. Returns ------- h : array-like, shape (n_samples, n_components) Values of the hidden layer. """ p = self._mean_hiddens(v) return (rng.random_sample(size=p.shape) < p) def _sample_visibles(self, h, rng): """Sample from the distribution P(v|h). Parameters ---------- h : array-like, shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState Random number generator to use. Returns ------- v : array-like, shape (n_samples, n_features) Values of the visible layer. """ p = np.dot(h, self.components_) p += self.intercept_visible_ expit(p, out=p) return (rng.random_sample(size=p.shape) < p) def _free_energy(self, v): """Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : array-like, shape (n_samples,) The value of the free energy. """ return (- safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_).sum(axis=1)) def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : array-like, shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self, "components_") if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_ def partial_fit(self, X, y=None): """Fit the model to the data X which should contain a partial segment of the data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. Returns ------- self : BernoulliRBM The fitted model. """ X = check_array(X, accept_sparse='csr', dtype=np.float) if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) if not hasattr(self, 'components_'): self.components_ = np.asarray( self.random_state_.normal( 0, 0.01, (self.n_components, X.shape[1]) ), order='fortran') if not hasattr(self, 'intercept_hidden_'): self.intercept_hidden_ = np.zeros(self.n_components, ) if not hasattr(self, 'intercept_visible_'): self.intercept_visible_ = np.zeros(X.shape[1], ) if not hasattr(self, 'h_samples_'): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_) def _fit(self, v_pos, rng): """Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : array-like, shape (n_samples, n_features) The data to use for training. rng : RandomState Random number generator to use for sampling. """ h_pos = self._mean_hiddens(v_pos) v_neg = self._sample_visibles(self.h_samples_, rng) h_neg = self._mean_hiddens(v_neg) lr = float(self.learning_rate) / v_pos.shape[0] update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T update -= np.dot(h_neg.T, v_neg) self.components_ += lr * update self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)) self.intercept_visible_ += lr * (np.asarray( v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0)) h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial self.h_samples_ = np.floor(h_neg, h_neg) def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : array-like, shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """ check_is_fitted(self, "components_") v = check_array(X, accept_sparse='csr') rng = check_random_state(self.random_state) # Randomly corrupt one feature in each sample in v. ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if issparse(v): data = -2 * v[ind] + 1 v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) else: v_ = v.copy() v_[ind] = 1 - v_[ind] fe = self._free_energy(v) fe_ = self._free_energy(v_) return v.shape[1] * log_logistic(fe_ - fe) def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) Training data. Returns ------- self : BernoulliRBM The fitted model. """ X = check_array(X, accept_sparse='csr', dtype=np.float) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray( rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='fortran') self.intercept_hidden_ = np.zeros(self.n_components, ) self.intercept_visible_ = np.zeros(X.shape[1], ) self.h_samples_ = np.zeros((self.batch_size, self.n_components)) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches, n_samples)) verbose = self.verbose begin = time.time() for iteration in xrange(1, self.n_iter + 1): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print("[%s] Iteration %d, pseudo-likelihood = %.2f," " time = %.2fs" % (type(self).__name__, iteration, self.score_samples(X).mean(), end - begin)) begin = end return self
bsd-3-clause
snap-stanford/ogb
examples/graphproppred/code2/utils.py
1
6992
import torch from collections import Counter import numpy as np import torch class ASTNodeEncoder(torch.nn.Module): ''' Input: x: default node feature. the first and second column represents node type and node attributes. depth: The depth of the node in the AST. Output: emb_dim-dimensional vector ''' def __init__(self, emb_dim, num_nodetypes, num_nodeattributes, max_depth): super(ASTNodeEncoder, self).__init__() self.max_depth = max_depth self.type_encoder = torch.nn.Embedding(num_nodetypes, emb_dim) self.attribute_encoder = torch.nn.Embedding(num_nodeattributes, emb_dim) self.depth_encoder = torch.nn.Embedding(self.max_depth + 1, emb_dim) def forward(self, x, depth): depth[depth > self.max_depth] = self.max_depth return self.type_encoder(x[:,0]) + self.attribute_encoder(x[:,1]) + self.depth_encoder(depth) def get_vocab_mapping(seq_list, num_vocab): ''' Input: seq_list: a list of sequences num_vocab: vocabulary size Output: vocab2idx: A dictionary that maps vocabulary into integer index. Additioanlly, we also index '__UNK__' and '__EOS__' '__UNK__' : out-of-vocabulary term '__EOS__' : end-of-sentence idx2vocab: A list that maps idx to actual vocabulary. ''' vocab_cnt = {} vocab_list = [] for seq in seq_list: for w in seq: if w in vocab_cnt: vocab_cnt[w] += 1 else: vocab_cnt[w] = 1 vocab_list.append(w) cnt_list = np.array([vocab_cnt[w] for w in vocab_list]) topvocab = np.argsort(-cnt_list, kind = 'stable')[:num_vocab] print('Coverage of top {} vocabulary:'.format(num_vocab)) print(float(np.sum(cnt_list[topvocab]))/np.sum(cnt_list)) vocab2idx = {vocab_list[vocab_idx]: idx for idx, vocab_idx in enumerate(topvocab)} idx2vocab = [vocab_list[vocab_idx] for vocab_idx in topvocab] # print(topvocab) # print([vocab_list[v] for v in topvocab[:10]]) # print([vocab_list[v] for v in topvocab[-10:]]) vocab2idx['__UNK__'] = num_vocab idx2vocab.append('__UNK__') vocab2idx['__EOS__'] = num_vocab + 1 idx2vocab.append('__EOS__') # test the correspondence between vocab2idx and idx2vocab for idx, vocab in enumerate(idx2vocab): assert(idx == vocab2idx[vocab]) # test that the idx of '__EOS__' is len(idx2vocab) - 1. # This fact will be used in decode_arr_to_seq, when finding __EOS__ assert(vocab2idx['__EOS__'] == len(idx2vocab) - 1) return vocab2idx, idx2vocab def augment_edge(data): ''' Input: data: PyG data object Output: data (edges are augmented in the following ways): data.edge_index: Added next-token edge. The inverse edges were also added. data.edge_attr (torch.Long): data.edge_attr[:,0]: whether it is AST edge (0) for next-token edge (1) data.edge_attr[:,1]: whether it is original direction (0) or inverse direction (1) ''' ##### AST edge edge_index_ast = data.edge_index edge_attr_ast = torch.zeros((edge_index_ast.size(1), 2)) ##### Inverse AST edge edge_index_ast_inverse = torch.stack([edge_index_ast[1], edge_index_ast[0]], dim = 0) edge_attr_ast_inverse = torch.cat([torch.zeros(edge_index_ast_inverse.size(1), 1), torch.ones(edge_index_ast_inverse.size(1), 1)], dim = 1) ##### Next-token edge ## Obtain attributed nodes and get their indices in dfs order # attributed_node_idx = torch.where(data.node_is_attributed.view(-1,) == 1)[0] # attributed_node_idx_in_dfs_order = attributed_node_idx[torch.argsort(data.node_dfs_order[attributed_node_idx].view(-1,))] ## Since the nodes are already sorted in dfs ordering in our case, we can just do the following. attributed_node_idx_in_dfs_order = torch.where(data.node_is_attributed.view(-1,) == 1)[0] ## build next token edge # Given: attributed_node_idx_in_dfs_order # [1, 3, 4, 5, 8, 9, 12] # Output: # [[1, 3, 4, 5, 8, 9] # [3, 4, 5, 8, 9, 12] edge_index_nextoken = torch.stack([attributed_node_idx_in_dfs_order[:-1], attributed_node_idx_in_dfs_order[1:]], dim = 0) edge_attr_nextoken = torch.cat([torch.ones(edge_index_nextoken.size(1), 1), torch.zeros(edge_index_nextoken.size(1), 1)], dim = 1) ##### Inverse next-token edge edge_index_nextoken_inverse = torch.stack([edge_index_nextoken[1], edge_index_nextoken[0]], dim = 0) edge_attr_nextoken_inverse = torch.ones((edge_index_nextoken.size(1), 2)) data.edge_index = torch.cat([edge_index_ast, edge_index_ast_inverse, edge_index_nextoken, edge_index_nextoken_inverse], dim = 1) data.edge_attr = torch.cat([edge_attr_ast, edge_attr_ast_inverse, edge_attr_nextoken, edge_attr_nextoken_inverse], dim = 0) return data def encode_y_to_arr(data, vocab2idx, max_seq_len): ''' Input: data: PyG graph object output: add y_arr to data ''' # PyG >= 1.5.0 seq = data.y # PyG = 1.4.3 # seq = data.y[0] data.y_arr = encode_seq_to_arr(seq, vocab2idx, max_seq_len) return data def encode_seq_to_arr(seq, vocab2idx, max_seq_len): ''' Input: seq: A list of words output: add y_arr (torch.Tensor) ''' augmented_seq = seq[:max_seq_len] + ['__EOS__'] * max(0, max_seq_len - len(seq)) return torch.tensor([[vocab2idx[w] if w in vocab2idx else vocab2idx['__UNK__'] for w in augmented_seq]], dtype = torch.long) def decode_arr_to_seq(arr, idx2vocab): ''' Input: torch 1d array: y_arr Output: a sequence of words. ''' eos_idx_list = torch.nonzero(arr == len(idx2vocab) - 1, as_tuple=False) # find the position of __EOS__ (the last vocab in idx2vocab) if len(eos_idx_list) > 0: clippted_arr = arr[: torch.min(eos_idx_list)] # find the smallest __EOS__ else: clippted_arr = arr return list(map(lambda x: idx2vocab[x], clippted_arr.cpu())) def test(): seq_list = [['a', 'b'], ['a', 'b', 'c', 'df', 'f', '2edea', 'a'], ['eraea', 'a', 'c'], ['d'], ['4rq4f','f','a','a', 'g']] vocab2idx, idx2vocab = get_vocab_mapping(seq_list, 4) print(vocab2idx) print(idx2vocab) print() assert(len(vocab2idx) == len(idx2vocab)) for vocab, idx in vocab2idx.items(): assert(idx2vocab[idx] == vocab) for seq in seq_list: print(seq) arr = encode_seq_to_arr(seq, vocab2idx, max_seq_len = 4)[0] # Test the effect of predicting __EOS__ # arr[2] = vocab2idx['__EOS__'] print(arr) seq_dec = decode_arr_to_seq(arr, idx2vocab) print(arr) print(seq_dec) print('') if __name__ == '__main__': test()
mit
pianomania/scikit-learn
sklearn/neural_network/tests/test_mlp.py
28
22183
""" Testing for Multi-layer Perceptron module (sklearn.neural_network) """ # Author: Issam H. Laradji # License: BSD 3 clause import sys import warnings import numpy as np from numpy.testing import assert_almost_equal, assert_array_equal from sklearn.datasets import load_digits, load_boston, load_iris from sklearn.datasets import make_regression, make_multilabel_classification from sklearn.exceptions import ConvergenceWarning from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.metrics import roc_auc_score from sklearn.neural_network import MLPClassifier from sklearn.neural_network import MLPRegressor from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import StandardScaler, MinMaxScaler from scipy.sparse import csr_matrix from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal, assert_false, ignore_warnings) from sklearn.utils.testing import assert_raise_message np.seterr(all='warn') ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"] digits_dataset_multi = load_digits(n_class=3) X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200]) y_digits_multi = digits_dataset_multi.target[:200] digits_dataset_binary = load_digits(n_class=2) X_digits_binary = MinMaxScaler().fit_transform( digits_dataset_binary.data[:200]) y_digits_binary = digits_dataset_binary.target[:200] classification_datasets = [(X_digits_multi, y_digits_multi), (X_digits_binary, y_digits_binary)] boston = load_boston() Xboston = StandardScaler().fit_transform(boston.data)[: 200] yboston = boston.target[:200] iris = load_iris() X_iris = iris.data y_iris = iris.target def test_alpha(): # Test that larger alpha yields weights closer to zero X = X_digits_binary[:100] y = y_digits_binary[:100] alpha_vectors = [] alpha_values = np.arange(2) absolute_sum = lambda x: np.sum(np.abs(x)) for alpha in alpha_values: mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1) with ignore_warnings(category=ConvergenceWarning): mlp.fit(X, y) alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]), absolute_sum(mlp.coefs_[1])])) for i in range(len(alpha_values) - 1): assert (alpha_vectors[i] > alpha_vectors[i + 1]).all() def test_fit(): # Test that the algorithm solution is equal to a worked out example. X = np.array([[0.6, 0.8, 0.7]]) y = np.array([0]) mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1, activation='logistic', random_state=1, max_iter=1, hidden_layer_sizes=2, momentum=0) # set weights mlp.coefs_ = [0] * 2 mlp.intercepts_ = [0] * 2 mlp.n_outputs_ = 1 mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]]) mlp.coefs_[1] = np.array([[0.1], [0.2]]) mlp.intercepts_[0] = np.array([0.1, 0.1]) mlp.intercepts_[1] = np.array([1.0]) mlp._coef_grads = [] * 2 mlp._intercept_grads = [] * 2 # Initialize parameters mlp.n_iter_ = 0 mlp.learning_rate_ = 0.1 # Compute the number of layers mlp.n_layers_ = 3 # Pre-allocate gradient matrices mlp._coef_grads = [0] * (mlp.n_layers_ - 1) mlp._intercept_grads = [0] * (mlp.n_layers_ - 1) mlp.out_activation_ = 'logistic' mlp.t_ = 0 mlp.best_loss_ = np.inf mlp.loss_curve_ = [] mlp._no_improvement_count = 0 mlp._intercept_velocity = [np.zeros_like(intercepts) for intercepts in mlp.intercepts_] mlp._coef_velocity = [np.zeros_like(coefs) for coefs in mlp.coefs_] mlp.partial_fit(X, y, classes=[0, 1]) # Manually worked out example # h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1) # = 0.679178699175393 # h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1) # = 0.574442516811659 # o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1) # = 0.7654329236196236 # d21 = -(0 - 0.765) = 0.765 # d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667 # d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374 # W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200 # W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244 # W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336 # W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992 # W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002 # W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244 # W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294 # W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911 # b1grad1 = d11 = 0.01667 # b1grad2 = d12 = 0.0374 # b2grad = d21 = 0.765 # W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1], # [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992], # [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664, # 0.096008], [0.4939998, -0.002244]] # W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 * # [[0.5294], [0.45911]] = [[0.04706], [0.154089]] # b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374] # = [0.098333, 0.09626] # b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235 assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756], [0.2956664, 0.096008], [0.4939998, -0.002244]]), decimal=3) assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]), decimal=3) assert_almost_equal(mlp.intercepts_[0], np.array([0.098333, 0.09626]), decimal=3) assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3) # Testing output # h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 + # 0.7 * 0.4939998 + 0.098333) = 0.677 # h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 + # 0.7 * -0.002244 + 0.09626) = 0.572 # o1 = h * W2 + b21 = 0.677 * 0.04706 + # 0.572 * 0.154089 + 0.9235 = 1.043 # prob = sigmoid(o1) = 0.739 assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3) def test_gradient(): # Test gradient. # This makes sure that the activation functions and their derivatives # are correct. The numerical and analytical computation of the gradient # should be close. for n_labels in [2, 3]: n_samples = 5 n_features = 10 X = np.random.random((n_samples, n_features)) y = 1 + np.mod(np.arange(n_samples) + 1, n_labels) Y = LabelBinarizer().fit_transform(y) for activation in ACTIVATION_TYPES: mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10, solver='lbfgs', alpha=1e-5, learning_rate_init=0.2, max_iter=1, random_state=1) mlp.fit(X, y) theta = np.hstack([l.ravel() for l in mlp.coefs_ + mlp.intercepts_]) layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] + [mlp.n_outputs_]) activations = [] deltas = [] coef_grads = [] intercept_grads = [] activations.append(X) for i in range(mlp.n_layers_ - 1): activations.append(np.empty((X.shape[0], layer_units[i + 1]))) deltas.append(np.empty((X.shape[0], layer_units[i + 1]))) fan_in = layer_units[i] fan_out = layer_units[i + 1] coef_grads.append(np.empty((fan_in, fan_out))) intercept_grads.append(np.empty(fan_out)) # analytically compute the gradients def loss_grad_fun(t): return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas, coef_grads, intercept_grads) [value, grad] = loss_grad_fun(theta) numgrad = np.zeros(np.size(theta)) n = np.size(theta, 0) E = np.eye(n) epsilon = 1e-5 # numerically compute the gradients for i in range(n): dtheta = E[:, i] * epsilon numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] - loss_grad_fun(theta - dtheta)[0]) / (epsilon * 2.0)) assert_almost_equal(numgrad, grad) def test_lbfgs_classification(): # Test lbfgs on classification. # It should achieve a score higher than 0.95 for the binary and multi-class # versions of the digits dataset. for X, y in classification_datasets: X_train = X[:150] y_train = y[:150] X_test = X[150:] expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind) for activation in ACTIVATION_TYPES: mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, max_iter=150, shuffle=True, random_state=1, activation=activation) mlp.fit(X_train, y_train) y_predict = mlp.predict(X_test) assert_greater(mlp.score(X_train, y_train), 0.95) assert_equal((y_predict.shape[0], y_predict.dtype.kind), expected_shape_dtype) def test_lbfgs_regression(): # Test lbfgs on the boston dataset, a regression problems. X = Xboston y = yboston for activation in ACTIVATION_TYPES: mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=150, shuffle=True, random_state=1, activation=activation) mlp.fit(X, y) if activation == 'identity': assert_greater(mlp.score(X, y), 0.84) else: # Non linear models perform much better than linear bottleneck: assert_greater(mlp.score(X, y), 0.95) def test_learning_rate_warmstart(): # Tests that warm_start reuse past solutions. X = [[3, 2], [1, 6], [5, 6], [-2, -4]] y = [1, 1, 1, 0] for learning_rate in ["invscaling", "constant"]: mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4, learning_rate=learning_rate, max_iter=1, power_t=0.25, warm_start=True) with ignore_warnings(category=ConvergenceWarning): mlp.fit(X, y) prev_eta = mlp._optimizer.learning_rate mlp.fit(X, y) post_eta = mlp._optimizer.learning_rate if learning_rate == 'constant': assert_equal(prev_eta, post_eta) elif learning_rate == 'invscaling': assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t), post_eta) def test_multilabel_classification(): # Test that multi-label classification works as expected. # test fit method X, y = make_multilabel_classification(n_samples=50, random_state=0, return_indicator=True) mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5, max_iter=150, random_state=0, activation='logistic', learning_rate_init=0.2) mlp.fit(X, y) assert_equal(mlp.score(X, y), 1) # test partial fit method mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150, random_state=0, activation='logistic', alpha=1e-5, learning_rate_init=0.2) for i in range(100): mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4]) assert_greater(mlp.score(X, y), 0.9) def test_multioutput_regression(): # Test that multi-output regression works as expected X, y = make_regression(n_samples=200, n_targets=5) mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200, random_state=1) mlp.fit(X, y) assert_greater(mlp.score(X, y), 0.9) def test_partial_fit_classes_error(): # Tests that passing different classes to partial_fit raises an error X = [[3, 2]] y = [0] clf = MLPClassifier(solver='sgd') clf.partial_fit(X, y, classes=[0, 1]) assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2]) def test_partial_fit_classification(): # Test partial_fit on classification. # `partial_fit` should yield the same results as 'fit' for binary and # multi-class classification. for X, y in classification_datasets: X = X y = y mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1, tol=0, alpha=1e-5, learning_rate_init=0.2) with ignore_warnings(category=ConvergenceWarning): mlp.fit(X, y) pred1 = mlp.predict(X) mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5, learning_rate_init=0.2) for i in range(100): mlp.partial_fit(X, y, classes=np.unique(y)) pred2 = mlp.predict(X) assert_array_equal(pred1, pred2) assert_greater(mlp.score(X, y), 0.95) def test_partial_fit_unseen_classes(): # Non regression test for bug 6994 # Tests for labeling errors in partial fit clf = MLPClassifier(random_state=0) clf.partial_fit([[1], [2], [3]], ["a", "b", "c"], classes=["a", "b", "c", "d"]) clf.partial_fit([[4]], ["d"]) assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0) def test_partial_fit_regression(): # Test partial_fit on regression. # `partial_fit` should yield the same results as 'fit' for regression. X = Xboston y = yboston for momentum in [0, .9]: mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu', random_state=1, learning_rate_init=0.01, batch_size=X.shape[0], momentum=momentum) with warnings.catch_warnings(record=True): # catch convergence warning mlp.fit(X, y) pred1 = mlp.predict(X) mlp = MLPRegressor(solver='sgd', activation='relu', learning_rate_init=0.01, random_state=1, batch_size=X.shape[0], momentum=momentum) for i in range(100): mlp.partial_fit(X, y) pred2 = mlp.predict(X) assert_almost_equal(pred1, pred2, decimal=2) score = mlp.score(X, y) assert_greater(score, 0.75) def test_partial_fit_errors(): # Test partial_fit error handling. X = [[3, 2], [1, 6]] y = [1, 0] # no classes passed assert_raises(ValueError, MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2]) # lbfgs doesn't support partial_fit assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit')) def test_params_errors(): # Test that invalid parameters raise value error X = [[3, 2], [1, 6]] y = [1, 0] clf = MLPClassifier assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y) assert_raises(ValueError, clf(max_iter=-1).fit, X, y) assert_raises(ValueError, clf(shuffle='true').fit, X, y) assert_raises(ValueError, clf(alpha=-1).fit, X, y) assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y) assert_raises(ValueError, clf(momentum=2).fit, X, y) assert_raises(ValueError, clf(momentum=-0.5).fit, X, y) assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y) assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y) assert_raises(ValueError, clf(validation_fraction=1).fit, X, y) assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y) assert_raises(ValueError, clf(beta_1=1).fit, X, y) assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y) assert_raises(ValueError, clf(beta_2=1).fit, X, y) assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y) assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y) assert_raises(ValueError, clf(solver='hadoken').fit, X, y) assert_raises(ValueError, clf(learning_rate='converge').fit, X, y) assert_raises(ValueError, clf(activation='cloak').fit, X, y) def test_predict_proba_binary(): # Test that predict_proba works as expected for binary class. X = X_digits_binary[:50] y = y_digits_binary[:50] clf = MLPClassifier(hidden_layer_sizes=5) with ignore_warnings(category=ConvergenceWarning): clf.fit(X, y) y_proba = clf.predict_proba(X) y_log_proba = clf.predict_log_proba(X) (n_samples, n_classes) = y.shape[0], 2 proba_max = y_proba.argmax(axis=1) proba_log_max = y_log_proba.argmax(axis=1) assert_equal(y_proba.shape, (n_samples, n_classes)) assert_array_equal(proba_max, proba_log_max) assert_array_equal(y_log_proba, np.log(y_proba)) assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0) def test_predict_proba_multiclass(): # Test that predict_proba works as expected for multi class. X = X_digits_multi[:10] y = y_digits_multi[:10] clf = MLPClassifier(hidden_layer_sizes=5) with ignore_warnings(category=ConvergenceWarning): clf.fit(X, y) y_proba = clf.predict_proba(X) y_log_proba = clf.predict_log_proba(X) (n_samples, n_classes) = y.shape[0], np.unique(y).size proba_max = y_proba.argmax(axis=1) proba_log_max = y_log_proba.argmax(axis=1) assert_equal(y_proba.shape, (n_samples, n_classes)) assert_array_equal(proba_max, proba_log_max) assert_array_equal(y_log_proba, np.log(y_proba)) def test_predict_proba_multilabel(): # Test that predict_proba works as expected for multilabel. # Multilabel should not use softmax which makes probabilities sum to 1 X, Y = make_multilabel_classification(n_samples=50, random_state=0, return_indicator=True) n_samples, n_classes = Y.shape clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30, random_state=0) clf.fit(X, Y) y_proba = clf.predict_proba(X) assert_equal(y_proba.shape, (n_samples, n_classes)) assert_array_equal(y_proba > 0.5, Y) y_log_proba = clf.predict_log_proba(X) proba_max = y_proba.argmax(axis=1) proba_log_max = y_log_proba.argmax(axis=1) assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10) assert_array_equal(proba_max, proba_log_max) assert_array_equal(y_log_proba, np.log(y_proba)) def test_sparse_matrices(): # Test that sparse and dense input matrices output the same results. X = X_digits_binary[:50] y = y_digits_binary[:50] X_sparse = csr_matrix(X) mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15, random_state=1) mlp.fit(X, y) pred1 = mlp.predict(X) mlp.fit(X_sparse, y) pred2 = mlp.predict(X_sparse) assert_almost_equal(pred1, pred2) pred1 = mlp.predict(X) pred2 = mlp.predict(X_sparse) assert_array_equal(pred1, pred2) def test_tolerance(): # Test tolerance. # It should force the solver to exit the loop when it converges. X = [[3, 2], [1, 6]] y = [1, 0] clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd') clf.fit(X, y) assert_greater(clf.max_iter, clf.n_iter_) def test_verbose_sgd(): # Test verbose. X = [[3, 2], [1, 6]] y = [1, 0] clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10, hidden_layer_sizes=2) old_stdout = sys.stdout sys.stdout = output = StringIO() with ignore_warnings(category=ConvergenceWarning): clf.fit(X, y) clf.partial_fit(X, y) sys.stdout = old_stdout assert 'Iteration' in output.getvalue() def test_early_stopping(): X = X_digits_binary[:100] y = y_digits_binary[:100] tol = 0.2 clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd', early_stopping=True) clf.fit(X, y) assert_greater(clf.max_iter, clf.n_iter_) valid_scores = clf.validation_scores_ best_valid_score = clf.best_validation_score_ assert_equal(max(valid_scores), best_valid_score) assert_greater(best_valid_score + tol, valid_scores[-2]) assert_greater(best_valid_score + tol, valid_scores[-1]) def test_adaptive_learning_rate(): X = [[3, 2], [1, 6]] y = [1, 0] clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd', learning_rate='adaptive') clf.fit(X, y) assert_greater(clf.max_iter, clf.n_iter_) assert_greater(1e-6, clf._optimizer.learning_rate) @ignore_warnings(RuntimeError) def test_warm_start(): X = X_iris y = y_iris y_2classes = np.array([0] * 75 + [1] * 75) y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70) y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50) y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38) y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30) # No error raised clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs', warm_start=True).fit(X, y) clf.fit(X, y) clf.fit(X, y_3classes) for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes): clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs', warm_start=True).fit(X, y) message = ('warm_start can only be used where `y` has the same ' 'classes as in the previous call to fit.' ' Previously got [0 1 2], `y` has %s' % np.unique(y_i)) assert_raise_message(ValueError, message, clf.fit, X, y_i)
bsd-3-clause
D-K-E/cltk
src/cltk/core/data_types.py
1
12523
"""Custom data types for the CLTK. These types form the building blocks of the NLP pipeline. >>> from cltk.core.data_types import Language >>> from cltk.core.data_types import Word >>> from cltk.core.data_types import Process >>> from cltk.core.data_types import Doc >>> from cltk.core.data_types import Pipeline """ import importlib from abc import ABC, abstractmethod from collections import defaultdict from dataclasses import dataclass, field from typing import Dict, List, Type, Union import numpy as np import stringcase as sc from cltk.morphology.morphosyntax import MorphosyntacticFeatureBundle from cltk.morphology.universal_dependencies_features import MorphosyntacticFeature ud_mod = importlib.import_module("cltk.morphology.universal_dependencies_features") @dataclass class Language: """For holding information about any given language. Used to encode data from ISO 639-3 and Glottolog at ``cltk.languages.glottolog.LANGUAGES``. May be extended by user for dialects or languages not documented by ISO 639-3. >>> from cltk.core.data_types import Language >>> from cltk.languages.utils import get_lang >>> lat = get_lang("lat") >>> isinstance(lat, Language) True >>> lat Language(name='Latin', glottolog_id='lati1261', latitude=41.9026, longitude=12.4502, dates=[], family_id='indo1319', parent_id='impe1234', level='language', iso_639_3_code='lat', type='a') """ name: str # Glottolog description glottolog_id: str latitude: float longitude: float dates: List[int] # add later; not available from Glottolog or ISO list family_id: str # from Glottolog parent_id: str # from Glottolog level: str # a language or a dialect iso_639_3_code: str type: str # "a" for ancient and "h" for historical; this from Glottolog @dataclass class Word: """Contains attributes of each processed word in a list of words. Designed to be used in the ``Doc.words`` dataclass. >>> from cltk.core.data_types import Word >>> from cltk.languages.example_texts import get_example_text >>> get_example_text("lat")[:25] 'Gallia est omnis divisa i' >>> from cltk.languages.utils import get_lang >>> lat = get_lang("lat") >>> Word(index_char_start=0, index_char_stop=6, index_token=0, string=get_example_text("lat")[0:6], pos="nom") Word(index_char_start=0, index_char_stop=6, index_token=0, index_sentence=None, string='Gallia', pos='nom', \ lemma=None, stem=None, scansion=None, xpos=None, upos=None, dependency_relation=None, governor=None, features={}, \ category={}, stop=None, named_entity=None, syllables=None, phonetic_transcription=None, definition=None) """ index_char_start: int = None index_char_stop: int = None index_token: int = None index_sentence: int = None string: str = None pos: str = None lemma: str = None stem: str = None scansion: str = None xpos: str = None # treebank-specific POS tag (from stanza) upos: str = None # universal POS tag (from stanza) dependency_relation: str = None # (from stanza) governor: int = None features: MorphosyntacticFeatureBundle = MorphosyntacticFeatureBundle() category: MorphosyntacticFeatureBundle = MorphosyntacticFeatureBundle() embedding: np.ndarray = field(repr=False, default=None) stop: bool = None named_entity: bool = None syllables: List[str] = None phonetic_transcription: str = None definition: str = None def __getitem__( self, feature_name: Union[str, Type[MorphosyntacticFeature]] ) -> List[MorphosyntacticFeature]: """Accessor to help get morphosyntatic features from a word object.""" return self.features[feature_name] def __getattr__(self, item: str): """Accessor to help get morphosyntatic features from a word object.""" feature_name = sc.pascalcase(item) if feature_name in ud_mod.__dict__: return self.features[feature_name] else: raise AttributeError(item) @dataclass class Sentence: """ The Data Container for sentences. """ words: List[Word] = None index: int = None embedding: np.ndarray = field(repr=False, default=None) def __getitem__(self, item: int) -> Word: """This indexing operation descends into the word list structure.""" return self.words[item] def __len__(self) -> int: """Returns the number of tokens in the sentence""" return len(self.words) @dataclass class Doc: """The object returned to the user from the ``NLP()`` class. Contains overall attributes of submitted texts, plus most importantly the processed tokenized text ``words``, being a list of ``Word`` types. >>> from cltk import NLP >>> from cltk.languages.example_texts import get_example_text >>> cltk_nlp = NLP(language="lat", suppress_banner=True) >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("lat")) >>> cltk_doc.raw[:38] 'Gallia est omnis divisa in partes tres' >>> isinstance(cltk_doc.raw, str) True >>> cltk_doc.tokens[:10] ['Gallia', 'est', 'omnis', 'divisa', 'in', 'partes', 'tres', ',', 'quarum', 'unam'] >>> cltk_doc.tokens_stops_filtered[:10] ['Gallia', 'omnis', 'divisa', 'partes', 'tres', ',', 'incolunt', 'Belgae', ',', 'aliam'] >>> cltk_doc.pos[:3] ['NOUN', 'AUX', 'PRON'] >>> cltk_doc.morphosyntactic_features[:3] [{Case: [nominative], Degree: [positive], Gender: [feminine], Number: [singular]}, {Mood: [indicative], Number: [singular], Person: [third], Tense: [present], VerbForm: [finite], Voice: [active]}, {Case: [nominative], Degree: [positive], Gender: [feminine], Number: [singular], PrononimalType: [indefinite]}] >>> cltk_doc[0].gender [feminine] >>> cltk_doc[0]['Case'] [nominative] >>> cltk_doc.lemmata[:5] ['mallis', 'sum', 'omnis', 'divido', 'in'] >>> len(cltk_doc.sentences) 9 >>> len(cltk_doc.sentences[0]) 26 >>> type(cltk_doc.sentences[0][2]) <class 'cltk.core.data_types.Word'> >>> cltk_doc.sentences[0][2].string 'omnis' >>> len(cltk_doc.sentences_tokens) 9 >>> len(cltk_doc.sentences_tokens[0]) 26 >>> isinstance(cltk_doc.sentences_tokens[0][2], str) True >>> cltk_doc.sentences_tokens[0][2] 'omnis' >>> len(cltk_doc.sentences_strings) 9 >>> len(cltk_doc.sentences_strings[0]) 150 >>> isinstance(cltk_doc.sentences_strings[0], str) True >>> cltk_doc.sentences_strings[1] 'Hi omnes lingua , institutis , legibus inter se differunt .' >>> import numpy as np >>> isinstance(cltk_doc.embeddings[1], np.ndarray) True """ language: str = None words: List[Word] = None pipeline: "Pipeline" = None # Note: type should be ``Pipeline`` w/o quotes raw: str = None normalized_text: str = None embeddings_model = None sentence_embeddings: Dict[int, np.ndarray] = field(repr=False, default=None) @property def sentences(self) -> List[Sentence]: """Returns a list of ``Sentence``s, with each ``Sentence`` being a container for a list of ``Word`` objects.""" sents: Dict[int, List[Word]] = defaultdict(list) for word in self.words: sents[word.index_sentence].append(word) for key in sents: sents[key].sort(key=lambda x: x.index_token) # Sometimes not available, nor initialized; e.g. stanza if not self.sentence_embeddings: self.sentence_embeddings = dict() return [ Sentence(words=val, index=key, embedding=self.sentence_embeddings.get(key)) for key, val in sorted(sents.items(), key=lambda x: x[0]) ] @property def sentences_tokens(self) -> List[List[str]]: """Returns a list of lists, with the inner list being a list of word token strings. """ sentences_tokens: List[List[str]] = list() for sentence in self.sentences: sentence_tokens: List[str] = [word.string for word in sentence] sentences_tokens.append(sentence_tokens) return sentences_tokens @property def sentences_strings(self) -> List[str]: """Returns a list of strings, with each string being a sentence reconstructed from the word tokens. """ sentences_list: List[List[str]] = self.sentences_tokens sentences_str: List[str] = list() for sentence_tokens in sentences_list: # type: List[str] if self.language == "akk": # 'akk' produces List[Tuple[str, str]] sentence_tokens_str = " ".join( [tup[0] for tup in sentence_tokens] ) # type: str else: sentence_tokens_str: str = " ".join(sentence_tokens) sentences_str.append(sentence_tokens_str) return sentences_str def _get_words_attribute(self, attribute): return [getattr(word, attribute) for word in self.words] @property def tokens(self) -> List[str]: """Returns a list of string word tokens of all words in the doc.""" tokens = self._get_words_attribute("string") return tokens @property def tokens_stops_filtered( self, ) -> List[str]: """Returns a list of string word tokens of all words in the doc, but with stopwords removed. """ tokens = self._get_words_attribute("string") # type: List[str] # create equal-length list of True & False/None values is_token_stop = self._get_words_attribute("stop") # type: List[bool] # remove from the token list any who index in ``is_token_stop`` is True tokens_no_stops = [ token for index, token in enumerate(tokens) if not is_token_stop[index] ] # type: List[str] return tokens_no_stops @property def pos(self) -> List[str]: """Returns a list of the POS tags of all words in the doc.""" return self._get_words_attribute("upos") @property def morphosyntactic_features(self) -> List[Dict[str, str]]: """Returns a list of dictionaries containing the morphosyntactic features of each word (when available). Each dictionary specifies feature names as keys and feature values as values. """ return self._get_words_attribute("features") @property def lemmata(self) -> List[str]: """Returns a list of lemmata, indexed to the word tokens provided by `Doc.tokens`. """ return self._get_words_attribute("lemma") @property def stems(self) -> List[str]: """Returns a list of word stems, indexed to the word tokens provided by `Doc.tokens`. """ stems = self._get_words_attribute("stem") return stems def __getitem__(self, word_index: int) -> Word: """Indexing operator overloaded to return the `Word` at index `word_index`.""" return self.words[word_index] @property def embeddings(self): """Returns an embedding for each word. TODO: Consider option to use lemma """ return self._get_words_attribute("embedding") @dataclass class Process(ABC): """For each type of NLP process there needs to be a definition. It includes the type of data it expects (``str``, ``List[str]``, ``Word``, etc.) and what field within ``Word`` it will populate. This base class is intended to be inherited by NLP process types (e.g., ``TokenizationProcess`` or ``DependencyProcess``). """ language: str = None @abstractmethod def run(self, input_doc: Doc) -> Doc: pass @dataclass class Pipeline: """Abstract ``Pipeline`` class to be inherited. # TODO: Consider adding a Unicode normalization as a default first Process >>> from cltk.core.data_types import Process, Pipeline >>> from cltk.languages.utils import get_lang >>> from cltk.tokenizers import LatinTokenizationProcess >>> a_pipeline = Pipeline(description="A custom Latin pipeline", processes=[LatinTokenizationProcess], language=get_lang("lat")) >>> a_pipeline.description 'A custom Latin pipeline' >>> issubclass(a_pipeline.processes[0], Process) True """ description: str processes: List[Type[Process]] language: Language def add_process(self, process: Type[Process]): self.processes.append(process)
mit
pianomania/scikit-learn
examples/applications/plot_out_of_core_classification.py
51
13651
""" ====================================================== Out-of-core classification of text documents ====================================================== This is an example showing how scikit-learn can be used for classification using an out-of-core approach: learning from data that doesn't fit into main memory. We make use of an online classifier, i.e., one that supports the partial_fit method, that will be fed with batches of examples. To guarantee that the features space remains the same over time we leverage a HashingVectorizer that will project each example into the same feature space. This is especially useful in the case of text classification where new features (words) may appear in each batch. The dataset used in this example is Reuters-21578 as provided by the UCI ML repository. It will be automatically downloaded and uncompressed on first run. The plot represents the learning curve of the classifier: the evolution of classification accuracy over the course of the mini-batches. Accuracy is measured on the first 1000 samples, held out as a validation set. To limit the memory consumption, we queue examples up to a fixed amount before feeding them to the learner. """ # Authors: Eustache Diemert <eustache@diemert.fr> # @FedericoV <https://github.com/FedericoV/> # License: BSD 3 clause from __future__ import print_function from glob import glob import itertools import os.path import re import tarfile import time import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams from sklearn.externals.six.moves import html_parser from sklearn.externals.six.moves import urllib from sklearn.datasets import get_data_home from sklearn.feature_extraction.text import HashingVectorizer from sklearn.linear_model import SGDClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import Perceptron from sklearn.naive_bayes import MultinomialNB def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() ############################################################################### # Reuters Dataset related routines # -------------------------------- # class ReutersParser(html_parser.HTMLParser): """Utility class to parse a SGML file and yield documents one at a time.""" def __init__(self, encoding='latin-1'): html_parser.HTMLParser.__init__(self) self._reset() self.encoding = encoding def handle_starttag(self, tag, attrs): method = 'start_' + tag getattr(self, method, lambda x: None)(attrs) def handle_endtag(self, tag): method = 'end_' + tag getattr(self, method, lambda: None)() def _reset(self): self.in_title = 0 self.in_body = 0 self.in_topics = 0 self.in_topic_d = 0 self.title = "" self.body = "" self.topics = [] self.topic_d = "" def parse(self, fd): self.docs = [] for chunk in fd: self.feed(chunk.decode(self.encoding)) for doc in self.docs: yield doc self.docs = [] self.close() def handle_data(self, data): if self.in_body: self.body += data elif self.in_title: self.title += data elif self.in_topic_d: self.topic_d += data def start_reuters(self, attributes): pass def end_reuters(self): self.body = re.sub(r'\s+', r' ', self.body) self.docs.append({'title': self.title, 'body': self.body, 'topics': self.topics}) self._reset() def start_title(self, attributes): self.in_title = 1 def end_title(self): self.in_title = 0 def start_body(self, attributes): self.in_body = 1 def end_body(self): self.in_body = 0 def start_topics(self, attributes): self.in_topics = 1 def end_topics(self): self.in_topics = 0 def start_d(self, attributes): self.in_topic_d = 1 def end_d(self): self.in_topic_d = 0 self.topics.append(self.topic_d) self.topic_d = "" def stream_reuters_documents(data_path=None): """Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """ DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/' 'reuters21578-mld/reuters21578.tar.gz') ARCHIVE_FILENAME = 'reuters21578.tar.gz' if data_path is None: data_path = os.path.join(get_data_home(), "reuters") if not os.path.exists(data_path): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) os.mkdir(data_path) def progress(blocknum, bs, size): total_sz_mb = '%.2f MB' % (size / 1e6) current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6) if _not_in_sphinx(): print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb), end='') archive_path = os.path.join(data_path, ARCHIVE_FILENAME) urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): print('\r', end='') print("untarring Reuters dataset...") tarfile.open(archive_path, 'r:gz').extractall(data_path) print("done.") parser = ReutersParser() for filename in glob(os.path.join(data_path, "*.sgm")): for doc in parser.parse(open(filename, 'rb')): yield doc ############################################################################### # Main # ---- # # Create the vectorizer and limit the number of features to a reasonable # maximum vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18, non_negative=True) # Iterator over parsed Reuters SGML files. data_stream = stream_reuters_documents() # We learn a binary classification between the "acq" class and all the others. # "acq" was chosen as it is more or less evenly distributed in the Reuters # files. For other datasets, one should take care of creating a test set with # a realistic portion of positive instances. all_classes = np.array([0, 1]) positive_class = 'acq' # Here are some classifiers that support the `partial_fit` method partial_fit_classifiers = { 'SGD': SGDClassifier(), 'Perceptron': Perceptron(), 'NB Multinomial': MultinomialNB(alpha=0.01), 'Passive-Aggressive': PassiveAggressiveClassifier(), } def get_minibatch(doc_iter, size, pos_class=positive_class): """Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """ data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics']) for doc in itertools.islice(doc_iter, size) if doc['topics']] if not len(data): return np.asarray([], dtype=int), np.asarray([], dtype=int) X_text, y = zip(*data) return X_text, np.asarray(y, dtype=int) def iter_minibatches(doc_iter, minibatch_size): """Generator of minibatches.""" X_text, y = get_minibatch(doc_iter, minibatch_size) while len(X_text): yield X_text, y X_text, y = get_minibatch(doc_iter, minibatch_size) # test data statistics test_stats = {'n_test': 0, 'n_test_pos': 0} # First we hold out a number of examples to estimate accuracy n_test_documents = 1000 tick = time.time() X_test_text, y_test = get_minibatch(data_stream, 1000) parsing_time = time.time() - tick tick = time.time() X_test = vectorizer.transform(X_test_text) vectorizing_time = time.time() - tick test_stats['n_test'] += len(y_test) test_stats['n_test_pos'] += sum(y_test) print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test))) def progress(cls_name, stats): """Report progress information, return a string.""" duration = time.time() - stats['t0'] s = "%20s classifier : \t" % cls_name s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats s += "accuracy: %(accuracy).3f " % stats s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration) return s cls_stats = {} for cls_name in partial_fit_classifiers: stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(), 'runtime_history': [(0, 0)], 'total_fit_time': 0.0} cls_stats[cls_name] = stats get_minibatch(data_stream, n_test_documents) # Discard test set # We will feed the classifier with mini-batches of 1000 documents; this means # we have at most 1000 docs in memory at any time. The smaller the document # batch, the bigger the relative overhead of the partial fit methods. minibatch_size = 1000 # Create the data_stream that parses Reuters SGML files and iterates on # documents as a stream. minibatch_iterators = iter_minibatches(data_stream, minibatch_size) total_vect_time = 0.0 # Main loop : iterate on mini-batches of examples for i, (X_train_text, y_train) in enumerate(minibatch_iterators): tick = time.time() X_train = vectorizer.transform(X_train_text) total_vect_time += time.time() - tick for cls_name, cls in partial_fit_classifiers.items(): tick = time.time() # update estimator with examples in the current mini-batch cls.partial_fit(X_train, y_train, classes=all_classes) # accumulate test accuracy stats cls_stats[cls_name]['total_fit_time'] += time.time() - tick cls_stats[cls_name]['n_train'] += X_train.shape[0] cls_stats[cls_name]['n_train_pos'] += sum(y_train) tick = time.time() cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test) cls_stats[cls_name]['prediction_time'] = time.time() - tick acc_history = (cls_stats[cls_name]['accuracy'], cls_stats[cls_name]['n_train']) cls_stats[cls_name]['accuracy_history'].append(acc_history) run_history = (cls_stats[cls_name]['accuracy'], total_vect_time + cls_stats[cls_name]['total_fit_time']) cls_stats[cls_name]['runtime_history'].append(run_history) if i % 3 == 0: print(progress(cls_name, cls_stats[cls_name])) if i % 3 == 0: print('\n') ############################################################################### # Plot results # ------------ def plot_accuracy(x, y, x_legend): """Plot accuracy as a function of x.""" x = np.array(x) y = np.array(y) plt.title('Classification accuracy as a function of %s' % x_legend) plt.xlabel('%s' % x_legend) plt.ylabel('Accuracy') plt.grid(True) plt.plot(x, y) rcParams['legend.fontsize'] = 10 cls_names = list(sorted(cls_stats.keys())) # Plot accuracy evolution plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with #examples accuracy, n_examples = zip(*stats['accuracy_history']) plot_accuracy(n_examples, accuracy, "training examples (#)") ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with runtime accuracy, runtime = zip(*stats['runtime_history']) plot_accuracy(runtime, accuracy, 'runtime (s)') ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') # Plot fitting times plt.figure() fig = plt.gcf() cls_runtime = [] for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['total_fit_time']) cls_runtime.append(total_vect_time) cls_names.append('Vectorization') bar_colors = ['b', 'g', 'r', 'c', 'm', 'y'] ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=10) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Training Times') def autolabel(rectangles): """attach some text vi autolabel on rectangles.""" for rect in rectangles: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, '%.4f' % height, ha='center', va='bottom') autolabel(rectangles) plt.show() # Plot prediction times plt.figure() cls_runtime = [] cls_names = list(sorted(cls_stats.keys())) for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['prediction_time']) cls_runtime.append(parsing_time) cls_names.append('Read/Parse\n+Feat.Extr.') cls_runtime.append(vectorizing_time) cls_names.append('Hashing\n+Vect.') ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=8) plt.setp(plt.xticks()[1], rotation=30) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Prediction Times (%d instances)' % n_test_documents) autolabel(rectangles) plt.show()
bsd-3-clause
theoryno3/scikit-learn
examples/applications/plot_model_complexity_influence.py
320
6372
""" ========================== Model Complexity Influence ========================== Demonstrate how model complexity influences both prediction accuracy and computational performance. The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for regression (resp. classification). For each class of models we make the model complexity vary through the choice of relevant model parameters and measure the influence on both computational performance (latency) and predictive power (MSE or Hamming Loss). """ print(__doc__) # Author: Eustache Diemert <eustache@diemert.fr> # License: BSD 3 clause import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.parasite_axes import host_subplot from mpl_toolkits.axisartist.axislines import Axes from scipy.sparse.csr import csr_matrix from sklearn import datasets from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.svm.classes import NuSVR from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor from sklearn.linear_model.stochastic_gradient import SGDClassifier from sklearn.metrics import hamming_loss ############################################################################### # Routines # initialize random generator np.random.seed(0) def generate_data(case, sparse=False): """Generate regression/classification data.""" bunch = None if case == 'regression': bunch = datasets.load_boston() elif case == 'classification': bunch = datasets.fetch_20newsgroups_vectorized(subset='all') X, y = shuffle(bunch.data, bunch.target) offset = int(X.shape[0] * 0.8) X_train, y_train = X[:offset], y[:offset] X_test, y_test = X[offset:], y[offset:] if sparse: X_train = csr_matrix(X_train) X_test = csr_matrix(X_test) else: X_train = np.array(X_train) X_test = np.array(X_test) y_test = np.array(y_test) y_train = np.array(y_train) data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test} return data def benchmark_influence(conf): """ Benchmark influence of :changing_param: on both MSE and latency. """ prediction_times = [] prediction_powers = [] complexities = [] for param_value in conf['changing_param_values']: conf['tuned_params'][conf['changing_param']] = param_value estimator = conf['estimator'](**conf['tuned_params']) print("Benchmarking %s" % estimator) estimator.fit(conf['data']['X_train'], conf['data']['y_train']) conf['postfit_hook'](estimator) complexity = conf['complexity_computer'](estimator) complexities.append(complexity) start_time = time.time() for _ in range(conf['n_samples']): y_pred = estimator.predict(conf['data']['X_test']) elapsed_time = (time.time() - start_time) / float(conf['n_samples']) prediction_times.append(elapsed_time) pred_score = conf['prediction_performance_computer']( conf['data']['y_test'], y_pred) prediction_powers.append(pred_score) print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % ( complexity, conf['prediction_performance_label'], pred_score, elapsed_time)) return prediction_powers, prediction_times, complexities def plot_influence(conf, mse_values, prediction_times, complexities): """ Plot influence of model complexity on both accuracy and latency. """ plt.figure(figsize=(12, 6)) host = host_subplot(111, axes_class=Axes) plt.subplots_adjust(right=0.75) par1 = host.twinx() host.set_xlabel('Model Complexity (%s)' % conf['complexity_label']) y1_label = conf['prediction_performance_label'] y2_label = "Time (s)" host.set_ylabel(y1_label) par1.set_ylabel(y2_label) p1, = host.plot(complexities, mse_values, 'b-', label="prediction error") p2, = par1.plot(complexities, prediction_times, 'r-', label="latency") host.legend(loc='upper right') host.axis["left"].label.set_color(p1.get_color()) par1.axis["right"].label.set_color(p2.get_color()) plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__) plt.show() def _count_nonzero_coefficients(estimator): a = estimator.coef_.toarray() return np.count_nonzero(a) ############################################################################### # main code regression_data = generate_data('regression') classification_data = generate_data('classification', sparse=True) configurations = [ {'estimator': SGDClassifier, 'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss': 'modified_huber', 'fit_intercept': True}, 'changing_param': 'l1_ratio', 'changing_param_values': [0.25, 0.5, 0.75, 0.9], 'complexity_label': 'non_zero coefficients', 'complexity_computer': _count_nonzero_coefficients, 'prediction_performance_computer': hamming_loss, 'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)', 'postfit_hook': lambda x: x.sparsify(), 'data': classification_data, 'n_samples': 30}, {'estimator': NuSVR, 'tuned_params': {'C': 1e3, 'gamma': 2 ** -15}, 'changing_param': 'nu', 'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9], 'complexity_label': 'n_support_vectors', 'complexity_computer': lambda x: len(x.support_vectors_), 'data': regression_data, 'postfit_hook': lambda x: x, 'prediction_performance_computer': mean_squared_error, 'prediction_performance_label': 'MSE', 'n_samples': 30}, {'estimator': GradientBoostingRegressor, 'tuned_params': {'loss': 'ls'}, 'changing_param': 'n_estimators', 'changing_param_values': [10, 50, 100, 200, 500], 'complexity_label': 'n_trees', 'complexity_computer': lambda x: x.n_estimators, 'data': regression_data, 'postfit_hook': lambda x: x, 'prediction_performance_computer': mean_squared_error, 'prediction_performance_label': 'MSE', 'n_samples': 30}, ] for conf in configurations: prediction_performances, prediction_times, complexities = \ benchmark_influence(conf) plot_influence(conf, prediction_performances, prediction_times, complexities)
bsd-3-clause
stefrobb/namebench
nb_third_party/dns/rdtypes/ANY/CNAME.py
248
1092
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.rdtypes.nsbase class CNAME(dns.rdtypes.nsbase.NSBase): """CNAME record Note: although CNAME is officially a singleton type, dnspython allows non-singleton CNAME rdatasets because such sets have been commonly used by BIND and other nameservers for load balancing.""" pass
apache-2.0
jschavem/facial-expression-classification
source/classification/reference/cross_validate.py
1
1910
from sklearn import cross_validation from sklearn.svm import SVC from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.metrics import accuracy_score import numpy as np import csv import glob from sklearn import datasets iris = datasets.load_iris() def c_validate(filename): data = [] with open(filename, 'rb') as csvfile: for idx, row in enumerate(csv.reader(csvfile, delimiter=',', quotechar='"')): if idx == 0 and len(data) > 0: continue data.append(row) print "Attribute %s" % data[0][0] X = np.array([row[3:] for row in data[1:]]) y = np.array([row[0] for row in data[1:]]) print("Loaded %d sessions and %d truthy values for %s" % (len(X), len(y), filename)) leave_one_out = cross_validation.LeaveOneOut(len(y)) act = [] pred = [] for train_index, test_index in leave_one_out: X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] clf = SVC() clf.fit(X_train, y_train) p = clf.predict(X_test) act.append(y_test[0]) pred.append(p[0]) #print "%s, predicted %s" % (y_test[0], y_pred[0]) return act, pred num_test_files = 35 dataset = 'koelstra-approach' actual = [] predicted = [] for idx, filename in enumerate(glob.glob('../output/%s/*.csv' % dataset)): if idx == num_test_files: break a, p = c_validate(filename) actual += a predicted += p conf_matrix = confusion_matrix(actual, predicted, ['low', 'high']) print conf_matrix print "" scores = f1_score(actual, predicted, ['low', 'high'], 'low', average=None) class_counts = [sum(row) for row in conf_matrix] average_f1 = np.average(scores, weights=class_counts) print "Average F1 score: %.3f" % average_f1 print "Average accuracy: %.3f" % accuracy_score(actual, predicted)
mit
ray-project/ray
rllib/core/rl_module/rl_module.py
1
8609
import abc from typing import Mapping, Any, Type, TYPE_CHECKING if TYPE_CHECKING: from ray.rllib.core.rl_module.marl_module import MultiAgentRLModule from ray.rllib.utils.annotations import ( ExperimentalAPI, OverrideToImplementCustomLogic, OverrideToImplementCustomLogic_CallToSuperRecommended, ) from ray.rllib.models.specs.specs_dict import ModelSpec, check_specs from ray.rllib.models.distributions import Distribution from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.typing import SampleBatchType ModuleID = str @ExperimentalAPI class RLModule(abc.ABC): """Base class for RLlib modules. Here is the pseudo code for how the forward methods are called: # During Training (acting in env from each rollout worker) ---------------------------------------------------------- .. code-block:: python module: RLModule = ... obs = env.reset() while not done: fwd_outputs = module.forward_exploration({"obs": obs}) # this can be deterministic or stochastic exploration action = fwd_outputs["action_dist"].sample() next_obs, reward, done, info = env.step(action) buffer.add(obs, action, next_obs, reward, done, info) next_obs = obs # During Training (learning the policy) ---------------------------------------------------------- .. code-block:: python module: RLModule = ... fwd_ins = buffer.sample() fwd_outputs = module.forward_train(fwd_ins) loss = compute_loss(fwd_outputs, fwd_ins) update_params(module, loss) # During Inference (acting in env during evaluation) ---------------------------------------------------------- .. code-block:: python module: RLModule = ... obs = env.reset() while not done: fwd_outputs = module.forward_inference({"obs": obs}) # this can be deterministic or stochastic evaluation action = fwd_outputs["action_dist"].sample() next_obs, reward, done, info = env.step(action) next_obs = obs Args: config: The config object for the module. Abstract Methods: forward_train: Forward pass during training. forward_exploration: Forward pass during training for exploration. forward_inference: Forward pass during inference. Note: There is a reason that the specs are not written as abstract properties. The reason is that torch overrides `__getattr__` and `__setattr__`. This means that if we define the specs as properties, then any error in the property will be interpreted as a failure to retrieve the attribute and will invoke `__getattr__` which will give a confusing error about the attribute not found. More details here: https://github.com/pytorch/pytorch/issues/49726. """ @OverrideToImplementCustomLogic_CallToSuperRecommended def __init__(self, config: Mapping[str, Any] = None) -> None: self.config = config or {} @OverrideToImplementCustomLogic_CallToSuperRecommended def output_specs_inference(self) -> ModelSpec: """Returns the output specs of the forward_inference method. Override this method to customize the output specs of the inference call. The default implementation requires the forward_inference to reutn a dict that has `action_dist` key and its value is an instance of `Distribution`. This assumption must always hold. """ return ModelSpec({"action_dist": Distribution}) @OverrideToImplementCustomLogic_CallToSuperRecommended def output_specs_exploration(self) -> ModelSpec: """Returns the output specs of the forward_exploration method. Override this method to customize the output specs of the inference call. The default implementation requires the forward_exploration to reutn a dict that has `action_dist` key and its value is an instance of `Distribution`. This assumption must always hold. """ return ModelSpec({"action_dist": Distribution}) def output_specs_train(self) -> ModelSpec: """Returns the output specs of the forward_train method.""" return ModelSpec() def input_specs_inference(self) -> ModelSpec: """Returns the input specs of the forward_inference method.""" return ModelSpec() def input_specs_exploration(self) -> ModelSpec: """Returns the input specs of the forward_exploration method.""" return ModelSpec() def input_specs_train(self) -> ModelSpec: """Returns the input specs of the forward_train method.""" return ModelSpec() @check_specs( input_spec="input_specs_inference", output_spec="output_specs_inference" ) def forward_inference(self, batch: SampleBatchType, **kwargs) -> Mapping[str, Any]: """Forward-pass during evaluation, called from the sampler. This method should not be overriden. Instead, override the _forward_inference method. Args: batch: The input batch. This input batch should comply with input_specs_inference(). **kwargs: Additional keyword arguments. Returns: The output of the forward pass. This output should comply with the ouptut_specs_inference(). """ return self._forward_inference(batch, **kwargs) @abc.abstractmethod def _forward_inference(self, batch: NestedDict, **kwargs) -> Mapping[str, Any]: """Forward-pass during evaluation. See forward_inference for details.""" @check_specs( input_spec="input_specs_exploration", output_spec="output_specs_exploration" ) def forward_exploration( self, batch: SampleBatchType, **kwargs ) -> Mapping[str, Any]: """Forward-pass during exploration, called from the sampler. This method should not be overriden. Instead, override the _forward_exploration method. Args: batch: The input batch. This input batch should comply with input_specs_exploration(). **kwargs: Additional keyword arguments. Returns: The output of the forward pass. This output should comply with the ouptut_specs_exploration(). """ return self._forward_exploration(batch, **kwargs) @abc.abstractmethod def _forward_exploration(self, batch: NestedDict, **kwargs) -> Mapping[str, Any]: """Forward-pass during exploration. See forward_exploration for details.""" @check_specs(input_spec="input_specs_train", output_spec="output_specs_train") def forward_train(self, batch: SampleBatchType, **kwargs) -> Mapping[str, Any]: """Forward-pass during training called from the trainer. This method should not be overriden. Instead, override the _forward_train method. Args: batch: The input batch. This input batch should comply with input_specs_train(). **kwargs: Additional keyword arguments. Returns: The output of the forward pass. This output should comply with the ouptut_specs_train(). """ return self._forward_train(batch, **kwargs) @abc.abstractmethod def _forward_train(self, batch: NestedDict, **kwargs) -> Mapping[str, Any]: """Forward-pass during training. See forward_train for details.""" @abc.abstractmethod def get_state(self) -> Mapping[str, Any]: """Returns the state dict of the module.""" @abc.abstractmethod def set_state(self, state_dict: Mapping[str, Any]) -> None: """Sets the state dict of the module.""" @abc.abstractmethod def make_distributed(self, dist_config: Mapping[str, Any] = None) -> None: """Reserved API, Makes the module distributed.""" @abc.abstractmethod def is_distributed(self) -> bool: """Reserved API, Returns True if the module is distributed.""" def as_multi_agent(self) -> "MultiAgentRLModule": """Returns a multi-agent wrapper around this module.""" return self.get_multi_agent_class()({DEFAULT_POLICY_ID: self}) @classmethod @OverrideToImplementCustomLogic def get_multi_agent_class(cls) -> Type["MultiAgentRLModule"]: """Returns the multi-agent wrapper class for this module.""" from ray.rllib.core.rl_module.marl_module import MultiAgentRLModule return MultiAgentRLModule
apache-2.0
cainiaocome/scikit-learn
sklearn/linear_model/least_angle.py
42
49357
""" Least Angle Regression algorithm. See the documentation on the Generalized Linear Model for a complete discussion. """ from __future__ import print_function # Author: Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux # # License: BSD 3 clause from math import log import sys import warnings from distutils.version import LooseVersion import numpy as np from scipy import linalg, interpolate from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel from ..base import RegressorMixin from ..utils import arrayfuncs, as_float_array, check_X_y from ..cross_validation import check_cv from ..utils import ConvergenceWarning from ..externals.joblib import Parallel, delayed from ..externals.six.moves import xrange import scipy solve_triangular_args = {} if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): solve_triangular_args = {'check_finite': False} def lars_path(X, y, Xy=None, Gram=None, max_iter=500, alpha_min=0, method='lar', copy_X=True, eps=np.finfo(np.float).eps, copy_Gram=True, verbose=0, return_path=True, return_n_iter=False): """Compute Least Angle Regression or Lasso path using LARS algorithm [1] The optimization objective for the case method='lasso' is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 in the case of method='lars', the objective function is only known in the form of an implicit equation (see discussion in [1]) Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ----------- X : array, shape: (n_samples, n_features) Input data. y : array, shape: (n_samples) Input targets. max_iter : integer, optional (default=500) Maximum number of iterations to perform, set to infinity for no limit. Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features. alpha_min : float, optional (default=0) Minimum correlation along the path. It corresponds to the regularization parameter alpha parameter in the Lasso. method : {'lar', 'lasso'}, optional (default='lar') Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. eps : float, optional (default=``np.finfo(np.float).eps``) The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : bool, optional (default=True) If ``False``, ``X`` is overwritten. copy_Gram : bool, optional (default=True) If ``False``, ``Gram`` is overwritten. verbose : int (default=0) Controls output verbosity. return_path : bool, optional (default=True) If ``return_path==True`` returns the entire path, else returns only the last point of the path. return_n_iter : bool, optional (default=False) Whether to return the number of iterations. Returns -------- alphas : array, shape: [n_alphas + 1] Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter``, ``n_features`` or the number of nodes in the path with ``alpha >= alpha_min``, whichever is smaller. active : array, shape [n_alphas] Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas + 1) Coefficients along the path n_iter : int Number of iterations run. Returned only if return_n_iter is set to True. See also -------- lasso_path LassoLars Lars LassoLarsCV LarsCV sklearn.decomposition.sparse_encode References ---------- .. [1] "Least Angle Regression", Effron et al. http://www-stat.stanford.edu/~tibs/ftp/lars.pdf .. [2] `Wikipedia entry on the Least-angle regression <http://en.wikipedia.org/wiki/Least-angle_regression>`_ .. [3] `Wikipedia entry on the Lasso <http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_ """ n_features = X.shape[1] n_samples = y.size max_features = min(max_iter, n_features) if return_path: coefs = np.zeros((max_features + 1, n_features)) alphas = np.zeros(max_features + 1) else: coef, prev_coef = np.zeros(n_features), np.zeros(n_features) alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas? n_iter, n_active = 0, 0 active, indices = list(), np.arange(n_features) # holds the sign of covariance sign_active = np.empty(max_features, dtype=np.int8) drop = False # will hold the cholesky factorization. Only lower part is # referenced. # We are initializing this to "zeros" and not empty, because # it is passed to scipy linalg functions and thus if it has NaNs, # even if they are in the upper part that it not used, we # get errors raised. # Once we support only scipy > 0.12 we can use check_finite=False and # go back to "empty" L = np.zeros((max_features, max_features), dtype=X.dtype) swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,)) solve_cholesky, = get_lapack_funcs(('potrs',), (X,)) if Gram is None: if copy_X: # force copy. setting the array to be fortran-ordered # speeds up the calculation of the (partial) Gram matrix # and allows to easily swap columns X = X.copy('F') elif Gram == 'auto': Gram = None if X.shape[0] > X.shape[1]: Gram = np.dot(X.T, X) elif copy_Gram: Gram = Gram.copy() if Xy is None: Cov = np.dot(X.T, y) else: Cov = Xy.copy() if verbose: if verbose > 1: print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") else: sys.stdout.write('.') sys.stdout.flush() tiny = np.finfo(np.float).tiny # to avoid division by 0 warning tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning equality_tolerance = np.finfo(np.float32).eps while True: if Cov.size: C_idx = np.argmax(np.abs(Cov)) C_ = Cov[C_idx] C = np.fabs(C_) else: C = 0. if return_path: alpha = alphas[n_iter, np.newaxis] coef = coefs[n_iter] prev_alpha = alphas[n_iter - 1, np.newaxis] prev_coef = coefs[n_iter - 1] alpha[0] = C / n_samples if alpha[0] <= alpha_min + equality_tolerance: # early stopping if abs(alpha[0] - alpha_min) > equality_tolerance: # interpolation factor 0 <= ss < 1 if n_iter > 0: # In the first iteration, all alphas are zero, the formula # below would make ss a NaN ss = ((prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])) coef[:] = prev_coef + ss * (coef - prev_coef) alpha[0] = alpha_min if return_path: coefs[n_iter] = coef break if n_iter >= max_iter or n_active >= n_features: break if not drop: ########################################################## # Append x_j to the Cholesky factorization of (Xa * Xa') # # # # ( L 0 ) # # L -> ( ) , where L * w = Xa' x_j # # ( w z ) and z = ||x_j|| # # # ########################################################## sign_active[n_active] = np.sign(C_) m, n = n_active, C_idx + n_active Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) indices[n], indices[m] = indices[m], indices[n] Cov_not_shortened = Cov Cov = Cov[1:] # remove Cov[0] if Gram is None: X.T[n], X.T[m] = swap(X.T[n], X.T[m]) c = nrm2(X.T[n_active]) ** 2 L[n_active, :n_active] = \ np.dot(X.T[n_active], X.T[:n_active].T) else: # swap does only work inplace if matrix is fortran # contiguous ... Gram[m], Gram[n] = swap(Gram[m], Gram[n]) Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) c = Gram[n_active, n_active] L[n_active, :n_active] = Gram[n_active, :n_active] # Update the cholesky decomposition for the Gram matrix if n_active: linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) diag = max(np.sqrt(np.abs(c - v)), eps) L[n_active, n_active] = diag if diag < 1e-7: # The system is becoming too ill-conditioned. # We have degenerate vectors in our active set. # We'll 'drop for good' the last regressor added. # Note: this case is very rare. It is no longer triggered by the # test suite. The `equality_tolerance` margin added in 0.16.0 to # get early stopping to work consistently on all versions of # Python including 32 bit Python under Windows seems to make it # very difficult to trigger the 'drop for good' strategy. warnings.warn('Regressors in active set degenerate. ' 'Dropping a regressor, after %i iterations, ' 'i.e. alpha=%.3e, ' 'with an active set of %i regressors, and ' 'the smallest cholesky pivot element being %.3e' % (n_iter, alpha, n_active, diag), ConvergenceWarning) # XXX: need to figure a 'drop for good' way Cov = Cov_not_shortened Cov[0] = 0 Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) continue active.append(indices[n_active]) n_active += 1 if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '', n_active, C)) if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]: # alpha is increasing. This is because the updates of Cov are # bringing in too much numerical error that is greater than # than the remaining correlation with the # regressors. Time to bail out warnings.warn('Early stopping the lars path, as the residues ' 'are small and the current value of alpha is no ' 'longer well controlled. %i iterations, alpha=%.3e, ' 'previous alpha=%.3e, with an active set of %i ' 'regressors.' % (n_iter, alpha, prev_alpha, n_active), ConvergenceWarning) break # least squares solution least_squares, info = solve_cholesky(L[:n_active, :n_active], sign_active[:n_active], lower=True) if least_squares.size == 1 and least_squares == 0: # This happens because sign_active[:n_active] = 0 least_squares[...] = 1 AA = 1. else: # is this really needed ? AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active])) if not np.isfinite(AA): # L is too ill-conditioned i = 0 L_ = L[:n_active, :n_active].copy() while not np.isfinite(AA): L_.flat[::n_active + 1] += (2 ** i) * eps least_squares, info = solve_cholesky( L_, sign_active[:n_active], lower=True) tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) AA = 1. / np.sqrt(tmp) i += 1 least_squares *= AA if Gram is None: # equiangular direction of variables in the active set eq_dir = np.dot(X.T[:n_active].T, least_squares) # correlation between each unactive variables and # eqiangular vector corr_eq_dir = np.dot(X.T[n_active:], eq_dir) else: # if huge number of features, this takes 50% of time, I # think could be avoided if we just update it using an # orthogonal (QR) decomposition of X corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny)) g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny)) gamma_ = min(g1, g2, C / AA) # TODO: better names for these variables: z drop = False z = -coef[active] / (least_squares + tiny32) z_pos = arrayfuncs.min_pos(z) if z_pos < gamma_: # some coefficients have changed sign idx = np.where(z == z_pos)[0][::-1] # update the sign, important for LAR sign_active[idx] = -sign_active[idx] if method == 'lasso': gamma_ = z_pos drop = True n_iter += 1 if return_path: if n_iter >= coefs.shape[0]: del coef, alpha, prev_alpha, prev_coef # resize the coefs and alphas array add_features = 2 * max(1, (max_features - n_active)) coefs = np.resize(coefs, (n_iter + add_features, n_features)) alphas = np.resize(alphas, n_iter + add_features) coef = coefs[n_iter] prev_coef = coefs[n_iter - 1] alpha = alphas[n_iter, np.newaxis] prev_alpha = alphas[n_iter - 1, np.newaxis] else: # mimic the effect of incrementing n_iter on the array references prev_coef = coef prev_alpha[0] = alpha[0] coef = np.zeros_like(coef) coef[active] = prev_coef[active] + gamma_ * least_squares # update correlations Cov -= gamma_ * corr_eq_dir # See if any coefficient has changed sign if drop and method == 'lasso': # handle the case when idx is not length of 1 [arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in idx] n_active -= 1 m, n = idx, n_active # handle the case when idx is not length of 1 drop_idx = [active.pop(ii) for ii in idx] if Gram is None: # propagate dropped variable for ii in idx: for i in range(ii, n_active): X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) # yeah this is stupid indices[i], indices[i + 1] = indices[i + 1], indices[i] # TODO: this could be updated residual = y - np.dot(X[:, :n_active], coef[active]) temp = np.dot(X.T[n_active], residual) Cov = np.r_[temp, Cov] else: for ii in idx: for i in range(ii, n_active): indices[i], indices[i + 1] = indices[i + 1], indices[i] Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) # Cov_n = Cov_j + x_j * X + increment(betas) TODO: # will this still work with multiple drops ? # recompute covariance. Probably could be done better # wrong as Xy is not swapped with the rest of variables # TODO: this could be updated residual = y - np.dot(X, coef) temp = np.dot(X.T[drop_idx], residual) Cov = np.r_[temp, Cov] sign_active = np.delete(sign_active, idx) sign_active = np.append(sign_active, 0.) # just to maintain size if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx, n_active, abs(temp))) if return_path: # resize coefs in case of early stop alphas = alphas[:n_iter + 1] coefs = coefs[:n_iter + 1] if return_n_iter: return alphas, active, coefs.T, n_iter else: return alphas, active, coefs.T else: if return_n_iter: return alpha, active, coef, n_iter else: return alpha, active, coef ############################################################################### # Estimator classes class Lars(LinearModel, RegressorMixin): """Least Angle Regression model a.k.a. LAR Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- n_nonzero_coefs : int, optional Target number of non-zero coefficients. Use ``np.inf`` for no limit. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. fit_path : boolean If True the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \ whichever is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) \ | list of n_targets such arrays The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lars(n_nonzero_coefs=1) >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True, n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] See also -------- lars_path, LarsCV sklearn.decomposition.sparse_encode """ def __init__(self, fit_intercept=True, verbose=False, normalize=True, precompute='auto', n_nonzero_coefs=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True): self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.method = 'lar' self.precompute = precompute self.n_nonzero_coefs = n_nonzero_coefs self.eps = eps self.copy_X = copy_X self.fit_path = fit_path def _get_gram(self): # precompute if n_samples > n_features precompute = self.precompute if hasattr(precompute, '__array__'): Gram = precompute elif precompute == 'auto': Gram = 'auto' else: Gram = None return Gram def fit(self, X, y, Xy=None): """Fit the model using X, y as training data. parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \ optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True, multi_output=True) n_features = X.shape[1] X, y, X_mean, y_mean, X_std = self._center_data(X, y, self.fit_intercept, self.normalize, self.copy_X) if y.ndim == 1: y = y[:, np.newaxis] n_targets = y.shape[1] alpha = getattr(self, 'alpha', 0.) if hasattr(self, 'n_nonzero_coefs'): alpha = 0. # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter precompute = self.precompute if not hasattr(precompute, '__array__') and ( precompute is True or (precompute == 'auto' and X.shape[0] > X.shape[1]) or (precompute == 'auto' and y.shape[1] > 1)): Gram = np.dot(X.T, X) else: Gram = self._get_gram() self.alphas_ = [] self.n_iter_ = [] if self.fit_path: self.coef_ = [] self.active_ = [] self.coef_path_ = [] for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, active, coef_path, n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=True, return_n_iter=True) self.alphas_.append(alphas) self.active_.append(active) self.n_iter_.append(n_iter_) self.coef_path_.append(coef_path) self.coef_.append(coef_path[:, -1]) if n_targets == 1: self.alphas_, self.active_, self.coef_path_, self.coef_ = [ a[0] for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)] self.n_iter_ = self.n_iter_[0] else: self.coef_ = np.empty((n_targets, n_features)) for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, _, self.coef_[k], n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=False, return_n_iter=True) self.alphas_.append(alphas) self.n_iter_.append(n_iter_) if n_targets == 1: self.alphas_ = self.alphas_[0] self.n_iter_ = self.n_iter_[0] self._set_intercept(X_mean, y_mean, X_std) return self class LassoLars(Lars): """Lasso model fit with Least Angle Regression a.k.a. Lars It is a Linear Model trained with an L1 prior as regularizer. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- alpha : float Constant that multiplies the penalty term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by :class:`LinearRegression`. For numerical reasons, using ``alpha = 0`` with the LassoLars object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. fit_path : boolean If ``True`` the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \ nodes in the path with correlation greater than ``alpha``, whichever \ is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) or list If a list is passed it's expected to be one of n_targets such arrays. The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int. The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.LassoLars(alpha=0.01) >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True, fit_path=True, max_iter=500, normalize=True, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -0.963257...] See also -------- lars_path lasso_path Lasso LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ def __init__(self, alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True): self.alpha = alpha self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.method = 'lasso' self.precompute = precompute self.copy_X = copy_X self.eps = eps self.fit_path = fit_path ############################################################################### # Cross-validated estimator classes def _check_copy_and_writeable(array, copy=False): if copy or not array.flags.writeable: return array.copy() return array def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None, copy=True, method='lars', verbose=False, fit_intercept=True, normalize=True, max_iter=500, eps=np.finfo(np.float).eps): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied; if False, they may be overwritten. method : 'lar' | 'lasso' Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. verbose : integer, optional Sets the amount of verbosity fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Returns -------- alphas : array, shape (n_alphas,) Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever is smaller. active : list Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas) Coefficients along the path residues : array, shape (n_alphas, n_samples) Residues of the prediction on the test data """ X_train = _check_copy_and_writeable(X_train, copy) y_train = _check_copy_and_writeable(y_train, copy) X_test = _check_copy_and_writeable(X_test, copy) y_test = _check_copy_and_writeable(y_test, copy) if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] alphas, active, coefs = lars_path( X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False, method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps) if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] return alphas, active, coefs, residues.T class LarsCV(Lars): """Cross-validated Least Angle Regression model Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter: integer, optional Maximum number of iterations to perform. cv : cross-validation generator, optional see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to a 5-fold strategy max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds cv_mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. See also -------- lars_path, LassoLars, LassoLarsCV """ method = 'lar' def __init__(self, fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps, copy_X=True): self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.precompute = precompute self.copy_X = copy_X self.cv = cv self.max_n_alphas = max_n_alphas self.n_jobs = n_jobs self.eps = eps def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. Returns ------- self : object returns an instance of self. """ self.fit_path = True X, y = check_X_y(X, y, y_numeric=True) # init cross-validation generator cv = check_cv(self.cv, X, y, classifier=False) Gram = 'auto' if self.precompute else None cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_lars_path_residues)( X[train], y[train], X[test], y[test], Gram=Gram, copy=False, method=self.method, verbose=max(0, self.verbose - 1), normalize=self.normalize, fit_intercept=self.fit_intercept, max_iter=self.max_iter, eps=self.eps) for train, test in cv) all_alphas = np.concatenate(list(zip(*cv_paths))[0]) # Unique also sorts all_alphas = np.unique(all_alphas) # Take at most max_n_alphas values stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) all_alphas = all_alphas[::stride] mse_path = np.empty((len(all_alphas), len(cv_paths))) for index, (alphas, active, coefs, residues) in enumerate(cv_paths): alphas = alphas[::-1] residues = residues[::-1] if alphas[0] != 0: alphas = np.r_[0, alphas] residues = np.r_[residues[0, np.newaxis], residues] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] residues = np.r_[residues, residues[-1, np.newaxis]] this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) this_residues **= 2 mse_path[:, index] = np.mean(this_residues, axis=-1) mask = np.all(np.isfinite(mse_path), axis=-1) all_alphas = all_alphas[mask] mse_path = mse_path[mask] # Select the alpha that minimizes left-out error i_best_alpha = np.argmin(mse_path.mean(axis=-1)) best_alpha = all_alphas[i_best_alpha] # Store our parameters self.alpha_ = best_alpha self.cv_alphas_ = all_alphas self.cv_mse_path_ = mse_path # Now compute the full model # it will call a lasso internally when self if LassoLarsCV # as self.method == 'lasso' Lars.fit(self, X, y) return self @property def alpha(self): # impedance matching for the above Lars.fit (should not be documented) return self.alpha_ class LassoLarsCV(LarsCV): """Cross-validated Lasso, using the LARS algorithm The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. cv : cross-validation generator, optional see sklearn.cross_validation module. If None is passed, default to a 5-fold strategy max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds cv_mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. Notes ----- The object solves the same problem as the LassoCV object. However, unlike the LassoCV, it find the relevant alphas values by itself. In general, because of this property, it will be more stable. However, it is more fragile to heavily multicollinear datasets. It is more efficient than the LassoCV if only a small number of features are selected compared to the total number, for instance if there are very few samples compared to the number of features. See also -------- lars_path, LassoLars, LarsCV, LassoCV """ method = 'lasso' class LassoLarsIC(LassoLars): """Lasso model fit with Lars using BIC or AIC for model selection The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria are useful to select the value of the regularization parameter by making a trade-off between the goodness of fit and the complexity of the model. A good model should explain well the data while being simple. Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- criterion : 'bic' | 'aic' The type of criterion to use. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. Can be used for early stopping. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. alpha_ : float the alpha parameter chosen by the information criterion n_iter_ : int number of iterations run by lars_path to find the grid of alphas. criterion_ : array, shape (n_alphas,) The value of the information criteria ('aic', 'bic') across all alphas. The alpha which has the smallest information criteria is chosen. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.LassoLarsIC(criterion='bic') >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True, max_iter=500, normalize=True, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] Notes ----- The estimation of the number of degrees of freedom is given by: "On the degrees of freedom of the lasso" Hui Zou, Trevor Hastie, and Robert Tibshirani Ann. Statist. Volume 35, Number 5 (2007), 2173-2192. http://en.wikipedia.org/wiki/Akaike_information_criterion http://en.wikipedia.org/wiki/Bayesian_information_criterion See also -------- lars_path, LassoLars, LassoLarsCV """ def __init__(self, criterion='aic', fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True): self.criterion = criterion self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.copy_X = copy_X self.precompute = precompute self.eps = eps def fit(self, X, y, copy_X=True): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) training data. y : array-like, shape (n_samples,) target values. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Returns ------- self : object returns an instance of self. """ self.fit_path = True X, y = check_X_y(X, y, multi_output=True, y_numeric=True) X, y, Xmean, ymean, Xstd = LinearModel._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X) max_iter = self.max_iter Gram = self._get_gram() alphas_, active_, coef_path_, self.n_iter_ = lars_path( X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0, method='lasso', verbose=self.verbose, max_iter=max_iter, eps=self.eps, return_n_iter=True) n_samples = X.shape[0] if self.criterion == 'aic': K = 2 # AIC elif self.criterion == 'bic': K = log(n_samples) # BIC else: raise ValueError('criterion should be either bic or aic') R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals mean_squared_error = np.mean(R ** 2, axis=0) df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom for k, coef in enumerate(coef_path_.T): mask = np.abs(coef) > np.finfo(coef.dtype).eps if not np.any(mask): continue # get the number of degrees of freedom equal to: # Xc = X[:, mask] # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs df[k] = np.sum(mask) self.alphas_ = alphas_ with np.errstate(divide='ignore'): self.criterion_ = n_samples * np.log(mean_squared_error) + K * df n_best = np.argmin(self.criterion_) self.alpha_ = alphas_[n_best] self.coef_ = coef_path_[:, n_best] self._set_intercept(Xmean, ymean, Xstd) return self
bsd-3-clause
PatrickOReilly/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
77
17611
import numpy as np import scipy.sparse as sp from scipy import linalg from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from numpy.random import RandomState from sklearn.datasets import make_classification from sklearn.utils.sparsefuncs import (mean_variance_axis, incr_mean_variance_axis, inplace_column_scale, inplace_row_scale, inplace_swap_row, inplace_swap_column, min_max_axis, count_nonzero, csc_median_axis_0) from sklearn.utils.sparsefuncs_fast import (assign_rows_csr, inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from sklearn.utils.testing import assert_raises def test_mean_variance_axis0(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 assert_raises(TypeError, mean_variance_axis, X_lil, axis=0) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: X_test = X.astype(input_dtype) for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis=0) assert_equal(X_means.dtype, output_dtype) assert_equal(X_vars.dtype, output_dtype) assert_array_almost_equal(X_means, np.mean(X_test, axis=0)) assert_array_almost_equal(X_vars, np.var(X_test, axis=0)) def test_mean_variance_axis1(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 assert_raises(TypeError, mean_variance_axis, X_lil, axis=1) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: X_test = X.astype(input_dtype) for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis=0) assert_equal(X_means.dtype, output_dtype) assert_equal(X_vars.dtype, output_dtype) assert_array_almost_equal(X_means, np.mean(X_test, axis=0)) assert_array_almost_equal(X_vars, np.var(X_test, axis=0)) def test_incr_mean_variance_axis(): for axis in [0, 1]: rng = np.random.RandomState(0) n_features = 50 n_samples = 10 data_chunks = [rng.randint(0, 2, size=n_features) for i in range(n_samples)] # default params for incr_mean_variance last_mean = np.zeros(n_features) last_var = np.zeros_like(last_mean) last_n = 0 # Test errors X = np.array(data_chunks[0]) X = np.atleast_2d(X) X_lil = sp.lil_matrix(X) X_csr = sp.csr_matrix(X_lil) assert_raises(TypeError, incr_mean_variance_axis, axis, last_mean, last_var, last_n) assert_raises(TypeError, incr_mean_variance_axis, axis, last_mean, last_var, last_n) assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis, last_mean, last_var, last_n) # Test _incr_mean_and_var with a 1 row input X_means, X_vars = mean_variance_axis(X_csr, axis) X_means_incr, X_vars_incr, n_incr = \ incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_equal(X.shape[axis], n_incr) # Test _incremental_mean_and_var with whole data X = np.vstack(data_chunks) X_lil = sp.lil_matrix(X) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis) X_means_incr, X_vars_incr, n_incr = \ incr_mean_variance_axis(X_sparse, axis, last_mean, last_var, last_n) assert_equal(X_means_incr.dtype, output_dtype) assert_equal(X_vars_incr.dtype, output_dtype) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_equal(X.shape[axis], n_incr) def test_mean_variance_illegal_axis(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_csr = sp.csr_matrix(X) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3) assert_raises(ValueError, mean_variance_axis, X_csr, axis=2) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1) assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3, last_mean=None, last_var=None, last_n=None) assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2, last_mean=None, last_var=None, last_n=None) assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1, last_mean=None, last_var=None, last_n=None) def test_densify_rows(): for dtype in (np.float32, np.float64): X = sp.csr_matrix([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=dtype) X_rows = np.array([0, 2, 3], dtype=np.intp) out = np.ones((6, X.shape[1]), dtype=dtype) out_rows = np.array([1, 3, 4], dtype=np.intp) expect = np.ones_like(out) expect[out_rows] = X[X_rows, :].toarray() assign_rows_csr(X, X_rows, out_rows, out) assert_array_equal(out, expect) def test_inplace_column_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(200) XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_row_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(100) XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_swap_row(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) def test_inplace_swap_column(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) def test_min_max_axis0(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) def test_min_max_axis1(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) def test_min_max_axis_errors(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0) assert_raises(ValueError, min_max_axis, X_csr, axis=2) assert_raises(ValueError, min_max_axis, X_csc, axis=-3) def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal(count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2) def test_csc_row_median(): # Test csc_row_median actually calculates the median. # Test that it gives the same output when X is dense. rng = np.random.RandomState(0) X = rng.rand(100, 50) dense_median = np.median(X, axis=0) csc = sp.csc_matrix(X) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test that it gives the same output when X is sparse X = rng.rand(51, 100) X[X < 0.7] = 0.0 ind = rng.randint(0, 50, 10) X[ind] = -X[ind] csc = sp.csc_matrix(X) dense_median = np.median(X, axis=0) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test for toy data. X = [[0, -2], [-1, -1], [1, 0], [2, 1]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5])) X = [[0, -2], [-1, -5], [1, -3]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0., -3])) # Test that it raises an Error for non-csc matrices. assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X)) def test_inplace_normalize(): ones = np.ones((10, 1)) rs = RandomState(10) for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2): for dtype in (np.float64, np.float32): X = rs.randn(10, 5).astype(dtype) X_csr = sp.csr_matrix(X) inplace_csr_row_normalize(X_csr) assert_equal(X_csr.dtype, dtype) if inplace_csr_row_normalize is inplace_csr_row_normalize_l2: X_csr.data **= 2 assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
bsd-3-clause
glennq/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
77
17611
import numpy as np import scipy.sparse as sp from scipy import linalg from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from numpy.random import RandomState from sklearn.datasets import make_classification from sklearn.utils.sparsefuncs import (mean_variance_axis, incr_mean_variance_axis, inplace_column_scale, inplace_row_scale, inplace_swap_row, inplace_swap_column, min_max_axis, count_nonzero, csc_median_axis_0) from sklearn.utils.sparsefuncs_fast import (assign_rows_csr, inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from sklearn.utils.testing import assert_raises def test_mean_variance_axis0(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 assert_raises(TypeError, mean_variance_axis, X_lil, axis=0) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: X_test = X.astype(input_dtype) for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis=0) assert_equal(X_means.dtype, output_dtype) assert_equal(X_vars.dtype, output_dtype) assert_array_almost_equal(X_means, np.mean(X_test, axis=0)) assert_array_almost_equal(X_vars, np.var(X_test, axis=0)) def test_mean_variance_axis1(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 assert_raises(TypeError, mean_variance_axis, X_lil, axis=1) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: X_test = X.astype(input_dtype) for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis=0) assert_equal(X_means.dtype, output_dtype) assert_equal(X_vars.dtype, output_dtype) assert_array_almost_equal(X_means, np.mean(X_test, axis=0)) assert_array_almost_equal(X_vars, np.var(X_test, axis=0)) def test_incr_mean_variance_axis(): for axis in [0, 1]: rng = np.random.RandomState(0) n_features = 50 n_samples = 10 data_chunks = [rng.randint(0, 2, size=n_features) for i in range(n_samples)] # default params for incr_mean_variance last_mean = np.zeros(n_features) last_var = np.zeros_like(last_mean) last_n = 0 # Test errors X = np.array(data_chunks[0]) X = np.atleast_2d(X) X_lil = sp.lil_matrix(X) X_csr = sp.csr_matrix(X_lil) assert_raises(TypeError, incr_mean_variance_axis, axis, last_mean, last_var, last_n) assert_raises(TypeError, incr_mean_variance_axis, axis, last_mean, last_var, last_n) assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis, last_mean, last_var, last_n) # Test _incr_mean_and_var with a 1 row input X_means, X_vars = mean_variance_axis(X_csr, axis) X_means_incr, X_vars_incr, n_incr = \ incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_equal(X.shape[axis], n_incr) # Test _incremental_mean_and_var with whole data X = np.vstack(data_chunks) X_lil = sp.lil_matrix(X) X_csr = sp.csr_matrix(X_lil) X_csc = sp.csc_matrix(X_lil) expected_dtypes = [(np.float32, np.float32), (np.float64, np.float64), (np.int32, np.float64), (np.int64, np.float64)] for input_dtype, output_dtype in expected_dtypes: for X_sparse in (X_csr, X_csc): X_sparse = X_sparse.astype(input_dtype) X_means, X_vars = mean_variance_axis(X_sparse, axis) X_means_incr, X_vars_incr, n_incr = \ incr_mean_variance_axis(X_sparse, axis, last_mean, last_var, last_n) assert_equal(X_means_incr.dtype, output_dtype) assert_equal(X_vars_incr.dtype, output_dtype) assert_array_almost_equal(X_means, X_means_incr) assert_array_almost_equal(X_vars, X_vars_incr) assert_equal(X.shape[axis], n_incr) def test_mean_variance_illegal_axis(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_csr = sp.csr_matrix(X) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3) assert_raises(ValueError, mean_variance_axis, X_csr, axis=2) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1) assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3, last_mean=None, last_var=None, last_n=None) assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2, last_mean=None, last_var=None, last_n=None) assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1, last_mean=None, last_var=None, last_n=None) def test_densify_rows(): for dtype in (np.float32, np.float64): X = sp.csr_matrix([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=dtype) X_rows = np.array([0, 2, 3], dtype=np.intp) out = np.ones((6, X.shape[1]), dtype=dtype) out_rows = np.array([1, 3, 4], dtype=np.intp) expect = np.ones_like(out) expect[out_rows] = X[X_rows, :].toarray() assign_rows_csr(X, X_rows, out_rows, out) assert_array_equal(out, expect) def test_inplace_column_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(200) XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_row_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(100) XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_swap_row(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) def test_inplace_swap_column(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) def test_min_max_axis0(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) def test_min_max_axis1(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) def test_min_max_axis_errors(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0) assert_raises(ValueError, min_max_axis, X_csr, axis=2) assert_raises(ValueError, min_max_axis, X_csc, axis=-3) def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal(count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2) def test_csc_row_median(): # Test csc_row_median actually calculates the median. # Test that it gives the same output when X is dense. rng = np.random.RandomState(0) X = rng.rand(100, 50) dense_median = np.median(X, axis=0) csc = sp.csc_matrix(X) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test that it gives the same output when X is sparse X = rng.rand(51, 100) X[X < 0.7] = 0.0 ind = rng.randint(0, 50, 10) X[ind] = -X[ind] csc = sp.csc_matrix(X) dense_median = np.median(X, axis=0) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test for toy data. X = [[0, -2], [-1, -1], [1, 0], [2, 1]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5])) X = [[0, -2], [-1, -5], [1, -3]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0., -3])) # Test that it raises an Error for non-csc matrices. assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X)) def test_inplace_normalize(): ones = np.ones((10, 1)) rs = RandomState(10) for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2): for dtype in (np.float64, np.float32): X = rs.randn(10, 5).astype(dtype) X_csr = sp.csr_matrix(X) inplace_csr_row_normalize(X_csr) assert_equal(X_csr.dtype, dtype) if inplace_csr_row_normalize is inplace_csr_row_normalize_l2: X_csr.data **= 2 assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
bsd-3-clause
nickcdryan/rep
tests/test_grid.py
4
3211
from __future__ import division, print_function, absolute_import from collections import OrderedDict from sklearn import clone from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import roc_auc_score from rep.metaml import GridOptimalSearchCV, SubgridParameterOptimizer, FoldingScorer, \ RegressionParameterOptimizer from rep.test.test_estimators import generate_classification_data, check_grid, run_grid from rep.estimators import SklearnClassifier __author__ = 'Tatiana Likhomanenko' def grid_tmva(score_function): grid_param = OrderedDict({"MaxDepth": [4, 5], "NTrees": [10, 20]}) generator = SubgridParameterOptimizer(grid_param) scorer = FoldingScorer(score_function) from rep.estimators import TMVAClassifier grid = GridOptimalSearchCV(TMVAClassifier(features=['column0', 'column1']), generator, scorer) cl = check_grid(grid, False, False, False) assert 1 <= len(cl.features) <= 3 params = cl.get_params() for key in grid_param: assert params[key] == grid.generator.best_params_[key] def grid_sklearn(score_function): grid_param = OrderedDict({"n_estimators": [10, 20], "learning_rate": [0.1, 0.05], 'features': [['column0', 'column1'], ['column0', 'column1', 'column2']]}) generator = RegressionParameterOptimizer(grid_param) scorer = FoldingScorer(score_function) grid = GridOptimalSearchCV(SklearnClassifier(clf=AdaBoostClassifier()), generator, scorer) cl = check_grid(grid, False, False, False) assert 1 <= len(cl.features) <= 3 params = cl.get_params() for key in grid_param: if key in params: assert params[key] == grid.generator.best_params_[key] else: assert params['clf__' + key] == grid.generator.best_params_[key] def grid_custom(custom): grid_param = OrderedDict({"n_estimators": [10, 20], "learning_rate": [0.1, 0.05], 'features': [['column0', 'column1'], ['column0', 'column1', 'column2']]}) generator = SubgridParameterOptimizer(grid_param) grid = GridOptimalSearchCV(SklearnClassifier(clf=AdaBoostClassifier(), features=['column0', 'column1']), generator, custom) cl = check_grid(grid, False, False, False) assert 1 <= len(cl.features) <= 3 params = cl.get_params() for key in grid_param: if key in params: assert params[key] == grid.generator.best_params_[key] else: assert params['clf__' + key] == grid.generator.best_params_[key] def test_grid(): def generate_scorer(test, labels): def custom(base_estimator, params, X, y, sample_weight=None): cl = clone(base_estimator) cl.set_params(**params) if sample_weight is not None: cl.fit(X, y, sample_weight) else: cl.fit(X, y) return roc_auc_score(labels, cl.predict_proba(test)[:, 1]) return custom X, y, _ = generate_classification_data() grid_custom(generate_scorer(X, y)) run_grid(grid_sklearn) run_grid(grid_tmva)
apache-2.0
ray-project/ray
python/ray/tune/examples/pbt_convnet_example.py
1
4558
#!/usr/bin/env python # flake8: noqa # fmt: off # __tutorial_imports_begin__ import argparse import os import numpy as np import torch import torch.optim as optim from torchvision import datasets from ray.tune.examples.mnist_pytorch import train, test, ConvNet,\ get_data_loaders import ray from ray import air, tune from ray.tune.schedulers import PopulationBasedTraining from ray.tune.utils import validate_save_restore # __tutorial_imports_end__ # __trainable_begin__ class PytorchTrainable(tune.Trainable): """Train a Pytorch ConvNet with Trainable and PopulationBasedTraining scheduler. The example reuse some of the functions in mnist_pytorch, and is a good demo for how to add the tuning function without changing the original training code. """ def setup(self, config): self.train_loader, self.test_loader = get_data_loaders() self.model = ConvNet() self.optimizer = optim.SGD( self.model.parameters(), lr=config.get("lr", 0.01), momentum=config.get("momentum", 0.9)) def step(self): train(self.model, self.optimizer, self.train_loader) acc = test(self.model, self.test_loader) return {"mean_accuracy": acc} def save_checkpoint(self, checkpoint_dir): checkpoint_path = os.path.join(checkpoint_dir, "model.pth") torch.save(self.model.state_dict(), checkpoint_path) return checkpoint_path def load_checkpoint(self, checkpoint_path): self.model.load_state_dict(torch.load(checkpoint_path)) def reset_config(self, new_config): for param_group in self.optimizer.param_groups: if "lr" in new_config: param_group["lr"] = new_config["lr"] if "momentum" in new_config: param_group["momentum"] = new_config["momentum"] self.config = new_config return True # __trainable_end__ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() ray.init(num_cpus=2) datasets.MNIST("~/data", train=True, download=True) # check if PytorchTrainble will save/restore correctly before execution validate_save_restore(PytorchTrainable) validate_save_restore(PytorchTrainable, use_object_store=True) # __pbt_begin__ scheduler = PopulationBasedTraining( time_attr="training_iteration", perturbation_interval=5, hyperparam_mutations={ # distribution for resampling "lr": lambda: np.random.uniform(0.0001, 1), # allow perturbations within this set of categorical values "momentum": [0.8, 0.9, 0.99], }) # __pbt_end__ # __tune_begin__ class CustomStopper(tune.Stopper): def __init__(self): self.should_stop = False def __call__(self, trial_id, result): max_iter = 5 if args.smoke_test else 100 if not self.should_stop and result["mean_accuracy"] > 0.96: self.should_stop = True return self.should_stop or result["training_iteration"] >= max_iter def stop_all(self): return self.should_stop stopper = CustomStopper() tuner = tune.Tuner( PytorchTrainable, run_config=air.RunConfig( name="pbt_test", stop=stopper, verbose=1, checkpoint_config=air.CheckpointConfig( checkpoint_score_attribute="mean_accuracy", checkpoint_frequency=5, num_to_keep=4, ), ), tune_config=tune.TuneConfig( scheduler=scheduler, metric="mean_accuracy", mode="max", num_samples=4, reuse_actors=True, ), param_space={ "lr": tune.uniform(0.001, 1), "momentum": tune.uniform(0.001, 1), }, ) results = tuner.fit() # __tune_end__ best_result = results.get_best_result() best_checkpoint = best_result.checkpoint restored_trainable = PytorchTrainable() restored_trainable.restore(best_checkpoint) best_model = restored_trainable.model # Note that test only runs on a small random set of the test data, thus the # accuracy may be different from metrics shown in tuning process. test_acc = test(best_model, get_data_loaders()[1]) print("best model accuracy: ", test_acc)
apache-2.0
PatrickOReilly/scikit-learn
examples/ensemble/plot_gradient_boosting_oob.py
80
4768
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3))) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3} clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params['n_estimators'] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``. """ score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = clf.loss_(y_test, y_pred) return score def cv_estimate(n_splits=3): cv = KFold(n_splits=n_splits) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv.split(X_train, y_train): cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_splits return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # plot curves and vertical lines for best iterations plt.plot(x, cumsum, label='OOB loss', color=oob_color) plt.plot(x, test_score, label='Test loss', color=test_color) plt.plot(x, cv_score, label='CV loss', color=cv_color) plt.axvline(x=oob_best_iter, color=oob_color) plt.axvline(x=test_best_iter, color=test_color) plt.axvline(x=cv_best_iter, color=cv_color) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array(xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ['OOB', 'CV', 'Test']) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label) plt.legend(loc='upper right') plt.ylabel('normalized loss') plt.xlabel('number of iterations') plt.show()
bsd-3-clause
ghliu/pytorch-ddpg
util.py
1
2518
import os import torch from torch.autograd import Variable USE_CUDA = torch.cuda.is_available() FLOAT = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor def prRed(prt): print("\033[91m {}\033[00m" .format(prt)) def prGreen(prt): print("\033[92m {}\033[00m" .format(prt)) def prYellow(prt): print("\033[93m {}\033[00m" .format(prt)) def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt)) def prPurple(prt): print("\033[95m {}\033[00m" .format(prt)) def prCyan(prt): print("\033[96m {}\033[00m" .format(prt)) def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt)) def prBlack(prt): print("\033[98m {}\033[00m" .format(prt)) def to_numpy(var): return var.cpu().data.numpy() if USE_CUDA else var.data.numpy() def to_tensor(ndarray, volatile=False, requires_grad=False, dtype=FLOAT): return Variable( torch.from_numpy(ndarray), volatile=volatile, requires_grad=requires_grad ).type(dtype) def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_( target_param.data * (1.0 - tau) + param.data * tau ) def hard_update(target, source): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(param.data) def get_output_folder(parent_dir, env_name): """Return save folder. Assumes folders in the parent_dir have suffix -run{run number}. Finds the highest run number and sets the output folder to that number + 1. This is just convenient so that if you run the same script multiple times tensorboard can plot all of the results on the same plots with different names. Parameters ---------- parent_dir: str Path of the directory containing all experiment runs. Returns ------- parent_dir/run_dir Path to this run's save directory. """ os.makedirs(parent_dir, exist_ok=True) experiment_id = 0 for folder_name in os.listdir(parent_dir): if not os.path.isdir(os.path.join(parent_dir, folder_name)): continue try: folder_name = int(folder_name.split('-run')[-1]) if folder_name > experiment_id: experiment_id = folder_name except: pass experiment_id += 1 parent_dir = os.path.join(parent_dir, env_name) parent_dir = parent_dir + '-run{}'.format(experiment_id) os.makedirs(parent_dir, exist_ok=True) return parent_dir
apache-2.0
drawks/ansible
lib/ansible/modules/cloud/google/gcp_bigquery_table_facts.py
2
18770
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_bigquery_table_facts description: - Gather facts for GCP Table short_description: Gather facts for GCP Table version_added: 2.8 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: dataset: description: - Name of the dataset. required: false extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: " a table facts" gcp_bigquery_table_facts: dataset: example_dataset project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" state: facts ''' RETURN = ''' resources: description: List of resources returned: always type: complex contains: tableReference: description: - Reference describing the ID of this table. returned: success type: complex contains: datasetId: description: - The ID of the dataset containing this table. returned: success type: str projectId: description: - The ID of the project containing this table. returned: success type: str tableId: description: - The ID of the the table. returned: success type: str creationTime: description: - The time when this dataset was created, in milliseconds since the epoch. returned: success type: int description: description: - A user-friendly description of the dataset. returned: success type: str friendlyName: description: - A descriptive name for this table. returned: success type: str id: description: - An opaque ID uniquely identifying the table. returned: success type: str labels: description: - The labels associated with this dataset. You can use these to organize and group your datasets . returned: success type: dict lastModifiedTime: description: - The time when this table was last modified, in milliseconds since the epoch. returned: success type: int location: description: - The geographic location where the table resides. This value is inherited from the dataset. returned: success type: str name: description: - Name of the table. returned: success type: str numBytes: description: - The size of this table in bytes, excluding any data in the streaming buffer. returned: success type: int numLongTermBytes: description: - The number of bytes in the table that are considered "long-term storage". returned: success type: int numRows: description: - The number of rows of data in this table, excluding any data in the streaming buffer. returned: success type: int type: description: - Describes the table type. returned: success type: str view: description: - The view definition. returned: success type: complex contains: useLegacySql: description: - Specifies whether to use BigQuery's legacy SQL for this view . returned: success type: bool userDefinedFunctionResources: description: - Describes user-defined function resources used in the query. returned: success type: complex contains: inlineCode: description: - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code. returned: success type: str resourceUri: description: - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). returned: success type: str timePartitioning: description: - If specified, configures time-based partitioning for this table. returned: success type: complex contains: expirationMs: description: - Number of milliseconds for which to keep the storage for a partition. returned: success type: int type: description: - The only type supported is DAY, which will generate one partition per day. returned: success type: str streamingBuffer: description: - Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer. returned: success type: complex contains: estimatedBytes: description: - A lower-bound estimate of the number of bytes currently in the streaming buffer. returned: success type: int estimatedRows: description: - A lower-bound estimate of the number of rows currently in the streaming buffer. returned: success type: int oldestEntryTime: description: - Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. returned: success type: int schema: description: - Describes the schema of this table. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. The maximum length is 1,024 characters. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD. returned: success type: list mode: description: - The field mode. returned: success type: str name: description: - The field name. returned: success type: str type: description: - The field data type. returned: success type: str encryptionConfiguration: description: - Custom encryption configuration. returned: success type: complex contains: kmsKeyName: description: - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. returned: success type: str expirationTime: description: - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. returned: success type: int externalDataConfiguration: description: - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. returned: success type: complex contains: autodetect: description: - Try to detect schema and format options automatically. Any option specified explicitly will be honored. returned: success type: bool compression: description: - The compression type of the data source. returned: success type: str ignoreUnknownValues: description: - Indicates if BigQuery should allow extra values that are not represented in the table schema . returned: success type: bool maxBadRecords: description: - The maximum number of bad records that BigQuery can ignore when reading data . returned: success type: int sourceFormat: description: - The data format. returned: success type: str sourceUris: description: - 'The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character and it must come after the ''bucket'' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard character is not allowed.' returned: success type: list schema: description: - The schema for the data. Schema is required for CSV and JSON formats. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD . returned: success type: list mode: description: - Field mode. returned: success type: str name: description: - Field name. returned: success type: str type: description: - Field data type. returned: success type: str googleSheetsOptions: description: - Additional options if sourceFormat is set to GOOGLE_SHEETS. returned: success type: complex contains: skipLeadingRows: description: - The number of rows at the top of a Google Sheet that BigQuery will skip when reading the data. returned: success type: int csvOptions: description: - Additional properties to set if sourceFormat is set to CSV. returned: success type: complex contains: allowJaggedRows: description: - Indicates if BigQuery should accept rows that are missing trailing optional columns . returned: success type: bool allowQuotedNewlines: description: - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file . returned: success type: bool encoding: description: - The character encoding of the data. returned: success type: str fieldDelimiter: description: - The separator for fields in a CSV file. returned: success type: str quote: description: - The value that is used to quote data sections in a CSV file. returned: success type: str skipLeadingRows: description: - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. returned: success type: int bigtableOptions: description: - Additional options if sourceFormat is set to BIGTABLE. returned: success type: complex contains: ignoreUnspecifiedColumnFamilies: description: - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema . returned: success type: bool readRowkeyAsString: description: - If field is true, then the rowkey column families will be read and converted to string. returned: success type: bool columnFamilies: description: - List of column families to expose in the table schema along with their types. returned: success type: complex contains: columns: description: - Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. returned: success type: complex contains: encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str fieldName: description: - If the qualifier is not a valid BigQuery field identifier, a valid identifier must be provided as the column field name and is used as field name in queries. returned: success type: str onlyReadLatest: description: - If this is set, only the latest version of value in this column are exposed . returned: success type: bool qualifierString: description: - Qualifier of the column. returned: success type: str type: description: - The type to convert the value in cells of this column. returned: success type: str encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str familyId: description: - Identifier of the column family. returned: success type: str onlyReadLatest: description: - If this is set only the latest version of value are exposed for all columns in this column family . returned: success type: bool type: description: - The type to convert the value in cells of this column family. returned: success type: str dataset: description: - Name of the dataset. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule(argument_spec=dict(dataset=dict(type='str'))) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] items = fetch_list(module, collection(module)) if items.get('tables'): items = items.get('tables') else: items = [] return_value = {'resources': items} module.exit_json(**return_value) def collection(module): return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) def fetch_list(module, link): auth = GcpSession(module, 'bigquery') response = auth.get(link) return return_if_object(module, response) def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
gpl-3.0
aflaxman/scikit-learn
examples/gaussian_process/plot_gpr_prior_posterior.py
33
2900
""" ========================================================================== Illustration of prior and posterior Gaussian process for different kernels ========================================================================== This example illustrates the prior and posterior of a GPR with different kernels. Mean, standard deviation, and 10 samples are shown for both prior and posterior. """ print(__doc__) # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel) kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)), 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1), 1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0, length_scale_bounds=(0.1, 10.0), periodicity_bounds=(1.0, 10.0)), ConstantKernel(0.1, (0.01, 10.0)) * (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2), 1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0), nu=1.5)] for fig_index, kernel in enumerate(kernels): # Specify Gaussian Process gp = GaussianProcessRegressor(kernel=kernel) # Plot prior plt.figure(fig_index, figsize=(8, 8)) plt.subplot(2, 1, 1) X_ = np.linspace(0, 5, 100) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) plt.plot(X_, y_mean, 'k', lw=3, zorder=9) plt.fill_between(X_, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') y_samples = gp.sample_y(X_[:, np.newaxis], 10) plt.plot(X_, y_samples, lw=1) plt.xlim(0, 5) plt.ylim(-3, 3) plt.title("Prior (kernel: %s)" % kernel, fontsize=12) # Generate data and fit GP rng = np.random.RandomState(4) X = rng.uniform(0, 5, 10)[:, np.newaxis] y = np.sin((X[:, 0] - 2.5) ** 2) gp.fit(X, y) # Plot posterior plt.subplot(2, 1, 2) X_ = np.linspace(0, 5, 100) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) plt.plot(X_, y_mean, 'k', lw=3, zorder=9) plt.fill_between(X_, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') y_samples = gp.sample_y(X_[:, np.newaxis], 10) plt.plot(X_, y_samples, lw=1) plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0)) plt.xlim(0, 5) plt.ylim(-3, 3) plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f" % (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)), fontsize=12) plt.tight_layout() plt.show()
bsd-3-clause
aflaxman/scikit-learn
sklearn/tests/test_kernel_approximation.py
30
9164
import numpy as np from scipy.sparse import csr_matrix from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal, assert_raises from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_warns_message from sklearn.metrics.pairwise import kernel_metrics from sklearn.kernel_approximation import RBFSampler from sklearn.kernel_approximation import AdditiveChi2Sampler from sklearn.kernel_approximation import SkewedChi2Sampler from sklearn.kernel_approximation import Nystroem from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel, chi2_kernel # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] def test_additive_chi2_sampler(): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # abbreviations for easier formula X_ = X[:, np.newaxis, :] Y_ = Y[np.newaxis, :, :] large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = (large_kernel.sum(axis=2)) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_matrix(X)) Y_sp_trans = transform.transform(csr_matrix(Y)) assert_array_equal(X_trans, X_sp_trans.A) assert_array_equal(Y_trans, Y_sp_trans.A) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) # test error on invalid sample_steps transform = AdditiveChi2Sampler(sample_steps=4) assert_raises(ValueError, transform.fit, X) # test that the sample interval is set correctly sample_steps_available = [1, 2, 3] for sample_steps in sample_steps_available: # test that the sample_interval is initialized correctly transform = AdditiveChi2Sampler(sample_steps=sample_steps) assert_equal(transform.sample_interval, None) # test that the sample_interval is changed in the fit method transform.fit(X) assert_not_equal(transform.sample_interval_, None) # test that the sample_interval is set correctly sample_interval = 0.3 transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval) assert_equal(transform.sample_interval, sample_interval) transform.fit(X) assert_equal(transform.sample_interval_, sample_interval) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # set on negative component but greater than c to ensure that the kernel # approximation is valid on the group (-c; +\infty) endowed with the skewed # multiplication. Y[0, 0] = -c / 2. # abbreviations for easier formula X_c = (X + c)[:, np.newaxis, :] Y_c = (Y + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) - np.log(X_c + Y_c)) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) assert_true(np.isfinite(kernel).all(), 'NaNs found in the Gram matrix') assert_true(np.isfinite(kernel_approx).all(), 'NaNs found in the approximate Gram matrix') # test error is raised on when inputs contains values smaller than -c Y_neg = Y.copy() Y_neg[0, 0] = -c * 2. assert_raises(ValueError, transform.transform, Y_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10. kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased np.abs(error, out=error) assert_less_equal(np.max(error), 0.1) # nothing too far off assert_less_equal(np.mean(error), 0.05) # mean is fairly close def test_input_validation(): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_matrix(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test callable kernel def linear_kernel(X, Y): return np.dot(X, Y.T) trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) def test_nystroem_default_parameters(): rnd = np.random.RandomState(42) X = rnd.uniform(size=(10, 4)) # rbf kernel should behave as gamma=None by default # aka gamma = 1 / n_features nystroem = Nystroem(n_components=10) X_transformed = nystroem.fit_transform(X) K = rbf_kernel(X, gamma=None) K2 = np.dot(X_transformed, X_transformed.T) assert_array_almost_equal(K, K2) # chi2 kernel should behave as gamma=1 by default nystroem = Nystroem(kernel='chi2', n_components=10) X_transformed = nystroem.fit_transform(X) K = chi2_kernel(X, gamma=1) K2 = np.dot(X_transformed, X_transformed.T) assert_array_almost_equal(K, K2) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert_true(np.all(np.isfinite(Y))) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=.1) nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=.1) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem(kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={'log': kernel_log}).fit(X) assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2) def linear_kernel(X, Y): return np.dot(X, Y.T) # if degree, gamma or coef0 is passed, we raise a warning msg = "Passing gamma, coef0 or degree to Nystroem" params = ({'gamma': 1}, {'coef0': 1}, {'degree': 2}) for param in params: ny = Nystroem(kernel=linear_kernel, **param) assert_warns_message(DeprecationWarning, msg, ny.fit, X)
bsd-3-clause
aflaxman/scikit-learn
examples/applications/plot_topics_extraction_with_nmf_lda.py
34
4820
""" ======================================================================================= Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation ======================================================================================= This is an example of applying :class:`sklearn.decomposition.NMF` and :class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus of documents and extract additive models of the topic structure of the corpus. The output is a list of topics, each represented as a list of terms (weights are not shown). Non-negative Matrix Factorization is applied with two different objective functions: the Frobenius norm, and the generalized Kullback-Leibler divergence. The latter is equivalent to Probabilistic Latent Semantic Indexing. The default parameters (n_samples / n_features / n_components) should make the example runnable in a couple of tens of seconds. You can try to increase the dimensions of the problem, but be aware that the time complexity is polynomial in NMF. In LDA, the time complexity is proportional to (n_samples * iterations). """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Lars Buitinck # Chyi-Kwei Yau <chyikwei.yau@gmail.com> # License: BSD 3 clause from __future__ import print_function from time import time from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.decomposition import NMF, LatentDirichletAllocation from sklearn.datasets import fetch_20newsgroups n_samples = 2000 n_features = 1000 n_components = 10 n_top_words = 20 def print_top_words(model, feature_names, n_top_words): for topic_idx, topic in enumerate(model.components_): message = "Topic #%d: " % topic_idx message += " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) print(message) print() # Load the 20 newsgroups dataset and vectorize it. We use a few heuristics # to filter out useless terms early on: the posts are stripped of headers, # footers and quoted replies, and common English words, words occurring in # only one document or in at least 95% of the documents are removed. print("Loading dataset...") t0 = time() dataset = fetch_20newsgroups(shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes')) data_samples = dataset.data[:n_samples] print("done in %0.3fs." % (time() - t0)) # Use tf-idf features for NMF. print("Extracting tf-idf features for NMF...") tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english') t0 = time() tfidf = tfidf_vectorizer.fit_transform(data_samples) print("done in %0.3fs." % (time() - t0)) # Use tf (raw term count) features for LDA. print("Extracting tf features for LDA...") tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english') t0 = time() tf = tf_vectorizer.fit_transform(data_samples) print("done in %0.3fs." % (time() - t0)) print() # Fit the NMF model print("Fitting the NMF model (Frobenius norm) with tf-idf features, " "n_samples=%d and n_features=%d..." % (n_samples, n_features)) t0 = time() nmf = NMF(n_components=n_components, random_state=1, alpha=.1, l1_ratio=.5).fit(tfidf) print("done in %0.3fs." % (time() - t0)) print("\nTopics in NMF model (Frobenius norm):") tfidf_feature_names = tfidf_vectorizer.get_feature_names() print_top_words(nmf, tfidf_feature_names, n_top_words) # Fit the NMF model print("Fitting the NMF model (generalized Kullback-Leibler divergence) with " "tf-idf features, n_samples=%d and n_features=%d..." % (n_samples, n_features)) t0 = time() nmf = NMF(n_components=n_components, random_state=1, beta_loss='kullback-leibler', solver='mu', max_iter=1000, alpha=.1, l1_ratio=.5).fit(tfidf) print("done in %0.3fs." % (time() - t0)) print("\nTopics in NMF model (generalized Kullback-Leibler divergence):") tfidf_feature_names = tfidf_vectorizer.get_feature_names() print_top_words(nmf, tfidf_feature_names, n_top_words) print("Fitting LDA models with tf features, " "n_samples=%d and n_features=%d..." % (n_samples, n_features)) lda = LatentDirichletAllocation(n_components=n_components, max_iter=5, learning_method='online', learning_offset=50., random_state=0) t0 = time() lda.fit(tf) print("done in %0.3fs." % (time() - t0)) print("\nTopics in LDA model:") tf_feature_names = tf_vectorizer.get_feature_names() print_top_words(lda, tf_feature_names, n_top_words)
bsd-3-clause
zfrenchee/pandas
pandas/core/frame.py
1
233731
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ from __future__ import division # pylint: disable=E1101,E1103 # pylint: disable=W0212,W0231,W0703,W0622 import functools import collections import itertools import sys import types import warnings from textwrap import dedent import numpy as np import numpy.ma as ma from pandas.core.dtypes.cast import ( maybe_upcast, cast_scalar_to_array, maybe_cast_to_datetime, maybe_infer_to_datetimelike, maybe_convert_platform, maybe_downcast_to_dtype, invalidate_string_dtypes, coerce_to_dtypes, maybe_upcast_putmask, find_common_type) from pandas.core.dtypes.common import ( is_categorical_dtype, is_object_dtype, is_extension_type, is_datetimetz, is_datetime64_any_dtype, is_datetime64tz_dtype, is_bool_dtype, is_integer_dtype, is_float_dtype, is_integer, is_scalar, is_dtype_equal, needs_i8_conversion, _get_dtype_from_object, _ensure_float, _ensure_float64, _ensure_int64, _ensure_platform_int, is_list_like, is_nested_list_like, is_iterator, is_sequence, is_named_tuple) from pandas.core.dtypes.missing import isna, notna from pandas.core.common import (_try_sort, _default_index, _values_from_object, _maybe_box_datetimelike, _dict_compat, standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, _ensure_index_from_sequences) from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series from pandas.core.categorical import Categorical import pandas.core.algorithms as algorithms from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) from pandas import compat from pandas.compat import PY36 from pandas.compat.numpy import function as nv from pandas.util._decorators import (Appender, Substitution, rewrite_axis_style_signature) from pandas.util._validators import (validate_bool_kwarg, validate_axis_style_args) from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core import accessor import pandas.core.common as com import pandas.core.nanops as nanops import pandas.core.ops as ops import pandas.io.formats.format as fmt import pandas.io.formats.console as console from pandas.io.formats.printing import pprint_thing import pandas.plotting._core as gfx from pandas._libs import lib, algos as libalgos from pandas.core.config import get_option # --------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = dict( axes='index, columns', klass='DataFrame', axes_single_arg="{0 or 'index', 1 or 'columns'}", optional_by=""" by : str or list of str Name or list of names which refer to the axis items.""", versionadded_to_excel='', optional_labels="""labels : array-like, optional New labels / index to conform the axis specified by 'axis' to.""", optional_axis="""axis : int or str, optional Axis to target. Can be either the axis name ('index', 'columns') or number (0, 1).""", ) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame objects by performing a database-style join operation by columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame how : {'left', 'right', 'outer', 'inner'}, default 'inner' * left: use only keys from left frame, similar to a SQL left outer join; preserve key order * right: use only keys from right frame, similar to a SQL right outer join; preserve key order * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : boolean, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels right_index : boolean, default False Use the index from the right DataFrame as the join key. Same caveats as left_index sort : boolean, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword) suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively copy : boolean, default True If False, do not copy data unnecessarily indicator : boolean or string, default False If True, adds a column to output DataFrame called "_merge" with information on the source of each row. If string, column with information on source of each row will be added to output DataFrame, and column will be named value of string. Information column is Categorical-type and takes on a value of "left_only" for observations whose merge key only appears in 'left' DataFrame, "right_only" for observations whose merge key only appears in 'right' DataFrame, and "both" if the observation's merge key is found in both. validate : string, default None If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. .. versionadded:: 0.21.0 Notes ----- Support for specifying index levels as the `on`, `left_on`, and `right_on` parameters was added in version 0.23.0 Examples -------- >>> A >>> B lkey value rkey value 0 foo 1 0 foo 5 1 bar 2 1 bar 6 2 baz 3 2 qux 7 3 foo 4 3 bar 8 >>> A.merge(B, left_on='lkey', right_on='rkey', how='outer') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 4 foo 5 2 bar 2 bar 6 3 bar 2 bar 8 4 baz 3 NaN NaN 5 NaN NaN qux 7 Returns ------- merged : DataFrame The output type will the be same as 'left', if it is a subclass of DataFrame. See also -------- merge_ordered merge_asof """ # ----------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame): """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects index : Index or array-like Index to use for resulting frame. Will default to np.arange(n) if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to np.arange(n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 a b c d e 0 2 8 8 3 4 1 4 2 9 0 9 2 1 0 7 8 0 3 5 1 7 1 3 4 6 0 2 4 2 See also -------- DataFrame.from_records : constructor from tuples, also record arrays DataFrame.from_dict : from dicts of Series, arrays, or dicts DataFrame.from_items : from sequence of (key, value) pairs pandas.read_csv, pandas.read_table, pandas.read_clipboard """ @property def _constructor(self): return DataFrame _constructor_sliced = Series _deprecations = NDFrame._deprecations | frozenset( ['sortlevel', 'get_value', 'set_value', 'from_csv']) @property def _constructor_expanddim(self): from pandas.core.panel import Panel return Panel def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = _masked_rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data[mask] = fill_value else: data = data.copy() mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns mgr = self._init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None) is not None: mgr = self._init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (list, types.GeneratorType)): if isinstance(data, types.GeneratorType): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = _to_arrays(data, columns, dtype=dtype) columns = _ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): index = _default_index(len(data[0])) else: index = _default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = self._init_dict({}, index, columns, dtype=dtype) elif isinstance(data, collections.Iterator): raise TypeError("data argument can't be an iterator") else: try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' 'incompatible data and dtype: %s' % e) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: values = cast_scalar_to_array((len(index), len(columns)), data, dtype=dtype) mgr = self._init_ndarray(values, index, columns, dtype=values.dtype, copy=False) else: raise ValueError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr, fastpath=True) def _init_dict(self, data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: columns = _ensure_index(columns) # GH10856 # raise ValueError if only scalars in dict if index is None: extract_index(list(data.values())) # prefilter if columns passed data = {k: v for k, v in compat.iteritems(data) if k in columns} if index is None: index = extract_index(list(data.values())) else: index = _ensure_index(index) arrays = [] data_names = [] for k in columns: if k not in data: # no obvious "empty" int column if dtype is not None and issubclass(dtype.type, np.integer): continue if dtype is None: # 1783 v = np.empty(len(index), dtype=object) elif np.issubdtype(dtype, np.flexible): v = np.empty(len(index), dtype=object) else: v = np.empty(len(index), dtype=dtype) v.fill(np.nan) else: v = data[k] data_names.append(k) arrays.append(v) else: keys = list(data.keys()) if not isinstance(data, OrderedDict): keys = _try_sort(keys) columns = data_names = Index(keys) arrays = [data[k] for k in keys] return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) def _init_ndarray(self, values, index, columns, dtype=None, copy=False): # input must be a ndarray, list, Series, index if isinstance(values, Series): if columns is None: if values.name is not None: columns = [values.name] if index is None: index = values.index else: values = values.reindex(index) # zero len case (GH #2234) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) # helper to create the axes as indexes def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: index = _default_index(N) else: index = _ensure_index(index) if columns is None: columns = _default_index(K) else: columns = _ensure_index(columns) return index, columns # we could have a categorical type passed or coerced to 'category' # recast this to an _arrays_to_mgr if (is_categorical_dtype(getattr(values, 'dtype', None)) or is_categorical_dtype(dtype)): if not hasattr(values, 'dtype'): values = _prep_ndarray(values, copy=copy) values = values.ravel() elif copy: values = values.copy() index, columns = _get_axes(len(values), 1) return _arrays_to_mgr([values], columns, index, columns, dtype=dtype) elif is_datetimetz(values): return self._init_dict({0: values}, index, columns, dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype values = _prep_ndarray(values, copy=copy) if dtype is not None: if not is_dtype_equal(values.dtype, dtype): try: values = values.astype(dtype) except Exception as orig: e = ValueError("failed to cast to '%s' (Exception was: %s)" % (dtype, orig)) raise_with_traceback(e) index, columns = _get_axes(*values.shape) values = values.T # if we don't have a dtype specified, then try to convert objects # on the entire block; this is to convert if we have datetimelike's # embedded in an object type if dtype is None and is_object_dtype(values): values = maybe_infer_to_datetimelike(values) return create_block_manager_from_blocks([values], [columns, index]) @property def axes(self): """ Return a list with the row axis labels and column axis labels as the only members. They are returned in that order. """ return [self.index, self.columns] @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. """ return len(self.index), len(self.columns) def _repr_fits_vertical_(self): """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width=False): """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case off non-interactive session, no boundaries apply. ignore_width is here so ipnb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if ((max_columns and nb_columns > max_columns) or ((not ignore_width) and width and nb_columns > (width // 2))): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or not com.in_interactive_session(): return True if (get_option('display.width') is not None or com.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if not (max_rows is None): # unlimited rows # min of two, where one may be None d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(l) for l in value.split('\n')) return repr_width < width def _info_repr(self): """True if the repr should show the info view.""" info_repr_option = (get_option("display.large_repr") == "info") return info_repr_option and not (self._repr_fits_horizontal_() and self._repr_fits_vertical_()) def __unicode__(self): """ Return a string representation for a particular DataFrame Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. """ buf = StringIO(u("")) if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() else: width = None self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols, line_width=width, show_dimensions=show_dimensions) return buf.getvalue() def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ # qtconsole doesn't report its line width, and also # behaves badly when outputting an HTML table # that doesn't fit the window, so disable it. # XXX: In IPython 3.x and above, the Qt console will not attempt to # display HTML, so this check can be removed when support for # IPython 2.x is no longer needed. if com.in_qtconsole(): # 'HTML output is disabled in QtConsole' return None if self._info_repr(): buf = StringIO(u("")) self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1) val = val.replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, notebook=True) else: return None @property def style(self): """ Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. See Also -------- pandas.io.formats.style.Styler """ from pandas.io.formats.style import Styler return Styler(self) def iteritems(self): """ Iterator over (column name, Series) pairs. See also -------- iterrows : Iterate over DataFrame rows as (index, Series) pairs. itertuples : Iterate over DataFrame rows as namedtuples of the values. """ if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self): """ Iterate over DataFrame rows as (index, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. Returns ------- it : generator A generator that iterates over the rows of the frame. See also -------- itertuples : Iterate over DataFrame rows as namedtuples of the values. iteritems : Iterate over (column name, Series) pairs. """ columns = self.columns klass = self._constructor_sliced for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k) yield k, s def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples, with index value as first element of the tuple. Parameters ---------- index : boolean, default True If True, return the index as the first element of the tuple. name : string, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. See also -------- iterrows : Iterate over DataFrame rows as (index, Series) pairs. iteritems : Iterate over (column name, Series) pairs. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]}, index=['a', 'b']) >>> df col1 col2 a 1 0.1 b 2 0.2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='a', col1=1, col2=0.10000000000000001) Pandas(Index='b', col1=2, col2=0.20000000000000001) """ arrays = [] fields = [] if index: arrays.append(self.index) fields.append("Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) # Python 3 supports at most 255 arguments to constructor, and # things get slow with this many fields in Python 2 if name is not None and len(self.columns) + index < 256: # `rename` is unsupported in Python 2.6 try: itertuple = collections.namedtuple(name, fields + list(self.columns), rename=True) return map(itertuple._make, zip(*arrays)) except Exception: pass # fallback to regular tuples return zip(*arrays) items = iteritems def __len__(self): """Returns length of info axis, but here we use the index """ return len(self.index) def dot(self, other): """ Matrix multiplication with DataFrame or Series objects Parameters ---------- other : DataFrame or Series Returns ------- dot_product : DataFrame or Series """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) # ---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict(cls, data, orient='columns', dtype=None): """ Construct DataFrame from dict of array-like or dicts Parameters ---------- data : dict {field : array-like} or {field : dict} orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer Returns ------- DataFrame """ index, columns = None, None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient != 'columns': # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) def to_dict(self, orient='dict', into=dict): """Convert DataFrame to dictionary. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - dict (default) : dict like {column -> {index -> value}} - list : dict like {column -> [values]} - series : dict like {column -> Series(values)} - split : dict like {index -> [index], columns -> [columns], data -> [values]} - records : list like [{column -> value}, ... , {column -> value}] - index : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- result : collections.Mapping like {column -> {index -> value}} Examples -------- >>> df = pd.DataFrame( {'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b']) >>> df col1 col2 a 1 0.1 b 2 0.2 >>> df.to_dict() {'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': a 1 b 2 Name: col1, dtype: int64, 'col2': a 0.50 b 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'columns': ['col1', 'col2'], 'data': [[1.0, 0.5], [2.0, 0.75]], 'index': ['a', 'b']} >>> df.to_dict('records') [{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}] >>> df.to_dict('index') {'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])), ('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}), defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})] """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 into_c = standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('l'): return into_c((k, v.tolist()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', lib.map_infer(self.values.ravel(), _maybe_box_datetimelike) .reshape(self.values.shape).tolist()))) elif orient.lower().startswith('s'): return into_c((k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): return [into_c((k, _maybe_box_datetimelike(v)) for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): return into_c((k, v.to_dict(into)) for k, v in self.iterrows()) else: raise ValueError("orient '%s' not understood" % orient) def to_gbq(self, destination_table, project_id, chunksize=10000, verbose=True, reauth=False, if_exists='fail', private_key=None): """Write a DataFrame to a Google BigQuery table. The main method a user calls to export pandas DataFrame contents to Google BigQuery table. Google BigQuery API Client Library v2 for Python is used. Documentation is available `here <https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__ Authentication to the Google BigQuery service is via OAuth 2.0. - If "private_key" is not provided: By default "application default credentials" are used. If default application credentials are not found or are restrictive, user account credentials are used. In this case, you will be asked to grant permissions for product name 'pandas GBQ'. - If "private_key" is provided: Service account credentials will be used to authenticate. Parameters ---------- dataframe : DataFrame DataFrame to be written destination_table : string Name of table to be written, in the form 'dataset.tablename' project_id : str Google BigQuery Account project ID. chunksize : int (default 10000) Number of rows to be inserted in each chunk from the dataframe. verbose : boolean (default True) Show percentage complete reauth : boolean (default False) Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. if_exists : {'fail', 'replace', 'append'}, default 'fail' 'fail': If table exists, do nothing. 'replace': If table exists, drop it, recreate it, and insert data. 'append': If table exists, insert data. Create if does not exist. private_key : str (optional) Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. jupyter iPython notebook on remote host) """ from pandas.io import gbq return gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, verbose=verbose, reauth=reauth, if_exists=if_exists, private_key=private_key) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): """ Convert structured or record ndarray to DataFrame Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets Returns ------- df : DataFrame """ # Make a copy of the input columns so we can modify it if columns is not None: columns = _ensure_index(columns) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = _ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns = [] for k, v in compat.iteritems(data): if k in columns: arr_columns.append(k) arrays.append(v) arrays, arr_columns = _reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = _to_arrays(data, columns) if columns is not None: columns = _ensure_index(columns) arr_columns = columns else: arrays, arr_columns = _to_arrays(data, columns, coerce_float=coerce_float) arr_columns = _ensure_index(arr_columns) if columns is not None: columns = _ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if (isinstance(index, compat.string_types) or not hasattr(index, "__iter__")): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: to_remove = [arr_columns.get_loc(field) for field in index] index_data = [arrays[i] for i in to_remove] result_index = _ensure_index_from_sequences(index_data, names=index) exclude.update(index) except Exception: result_index = index if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records(self, index=True, convert_datetime64=True): """ Convert DataFrame to record array. Index will be put in the 'index' field of the record array if requested Parameters ---------- index : boolean, default True Include index in resulting record array, stored in 'index' field convert_datetime64 : boolean, default True Whether to convert the index to datetime.datetime if it is a DatetimeIndex Returns ------- y : recarray """ if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = lmap(np.array, zip(*self.index.values)) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = (lmap(compat.text_type, index_names) + lmap(compat.text_type, self.columns)) else: arrays = [self[c].get_values() for c in self.columns] names = lmap(compat.text_type, self.columns) formats = [v.dtype for v in arrays] return np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} ) @classmethod def from_items(cls, items, columns=None, orient='columns'): """ Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- frame : DataFrame """ keys, values = lzip(*items) if orient == 'columns': if columns is not None: columns = _ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(_ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = _ensure_index(keys) arrays = values # GH 17312 # Provide more informative error msg when scalar values passed try: return cls._from_arrays(arrays, columns, None) except ValueError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = _ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed try: arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) except TypeError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'") @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=None, infer_datetime_format=False): """ Read CSV file (DEPRECATED, please use :func:`pandas.read_csv` instead). It is preferable to use the more powerful :func:`pandas.read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a DataFrame of time series data. This method only differs from the preferred :func:`pandas.read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) So a ``pd.DataFrame.from_csv(path)`` can be replaced by ``pd.read_csv(path, index_col=0, parse_dates=True)``. Parameters ---------- path : string file path or file handle / StringIO header : int, default 0 Row to use as header (skip prior rows) sep : string, default ',' Field delimiter index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table parse_dates : boolean, default True Parse dates. Different default from read_table tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) infer_datetime_format: boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. See also -------- pandas.read_csv Returns ------- y : DataFrame """ warnings.warn("from_csv is deprecated. Please use read_csv(...) " "instead. Note that some of the default arguments are " "different, so please refer to the documentation " "for from_csv when changing your function calls", FutureWarning, stacklevel=2) from pandas.io.parsers import read_table return read_table(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame Parameters ---------- fill_value : float, default NaN kind : {'block', 'integer'} Returns ------- y : SparseDataFrame """ from pandas.core.sparse.frame import SparseDataFrame return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value) def to_panel(self): """ Transform long (stacked) format (DataFrame) into wide (3D, Panel) format. .. deprecated:: 0.20.0 Currently the index of the DataFrame must be a 2-level MultiIndex. This may be generalized later Returns ------- panel : Panel """ # only support this kind for now if (not isinstance(self.index, MultiIndex) or # pragma: no cover len(self.index.levels) != 2): raise NotImplementedError('Only 2-level MultiIndex are supported.') if not self.index.is_unique: raise ValueError("Can't convert non-uniquely indexed " "DataFrame to Panel") self._consolidate_inplace() # minor axis must be sorted if self.index.lexsort_depth < 2: selfsorted = self.sort_index(level=0) else: selfsorted = self major_axis, minor_axis = selfsorted.index.levels major_labels, minor_labels = selfsorted.index.labels shape = len(major_axis), len(minor_axis) # preserve names, if any major_axis = major_axis.copy() major_axis.name = self.index.names[0] minor_axis = minor_axis.copy() minor_axis.name = self.index.names[1] # create new axes new_axes = [selfsorted.columns, major_axis, minor_axis] # create new manager new_mgr = selfsorted._data.reshape_nd(axes=new_axes, labels=[major_labels, minor_labels], shape=shape, ref_items=selfsorted.columns) return self._constructor_expanddim(new_mgr) def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, compression=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=None, date_format=None, doublequote=True, escapechar=None, decimal='.'): r"""Write DataFrame to a comma-separated values (csv) file Parameters ---------- path_or_buf : string or file handle, default None File path or object, if None is provided the result is returned as a string. sep : character, default ',' Field delimiter for the output file. na_rep : string, default '' Missing data representation float_format : string, default None Format string for floating point numbers columns : sequence, optional Columns to write header : boolean or list of string, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R mode : str Python write mode, default 'w' encoding : string, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. compression : string, optional a string representing the compression to use in the output file, allowed values are 'gzip', 'bz2', 'xz', only used when the first argument is a filename line_terminator : string, default ``'\n'`` The newline character or character sequence to use in the output file quoting : optional constant from csv module defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric quotechar : string (length 1), default '\"' character used to quote fields doublequote : boolean, default True Control quoting of `quotechar` inside a field escapechar : string (length 1), default None character used to escape `sep` and `quotechar` when appropriate chunksize : int or None rows to write at a time tupleize_cols : boolean, default False .. deprecated:: 0.21.0 This argument will be removed and will always write each row of the multi-index as a separate row in the CSV file. Write MultiIndex columns as a list of tuples (if True) or in the new, expanded format, where each MultiIndex column is a row in the CSV (if False). date_format : string, default None Format string for datetime objects decimal: string, default '.' Character recognized as decimal separator. E.g. use ',' for European data """ if tupleize_cols is not None: warnings.warn("The 'tupleize_cols' parameter is deprecated and " "will be removed in a future version", FutureWarning, stacklevel=2) else: tupleize_cols = False formatter = fmt.CSVFormatter(self, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() @Appender(_shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf', verbose=True, freeze_panes=None): from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep) formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine) def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None): """ A class for writing Stata binary dta files from array-like objects Parameters ---------- fname : str or buffer String path of file-like object convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder` time_stamp : datetime A datetime to use as file creation date. Default is the current time. dataset_label : str A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 Examples -------- >>> writer = StataWriter('./data_file.dta', data) >>> writer.write_file() Or with dates >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'}) >>> writer.write_file() """ from pandas.io.stata import StataWriter writer = StataWriter(fname, self, convert_dates=convert_dates, encoding=encoding, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels) writer.write_file() def to_feather(self, fname): """ write out the binary feather-format for DataFrames .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname) def to_parquet(self, fname, engine='auto', compression='snappy', **kwargs): """ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 Parameters ---------- fname : str string file path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first library to be installed is used. compression : str, optional, default 'snappy' compression method, includes {'gzip', 'snappy', 'brotli'} kwargs Additional keyword arguments passed to the engine """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine, compression=compression, **kwargs) @Substitution(header='Write out the column names. If a list of strings ' 'is given, it is assumed to be aliases for the ' 'column names') @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame to a console-friendly tabular output. """ formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, line_width=line_width, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) formatter.to_string() if buf is None: result = formatter.buf.getvalue() return result @Substitution(header='whether to print column labels, default True') @Appender(fmt.docstring_to_string, indents=1) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, bold_rows=True, classes=None, escape=True, max_rows=None, max_cols=None, show_dimensions=False, notebook=False, decimal='.', border=None): """ Render a DataFrame as an HTML table. `to_html`-specific options: bold_rows : boolean, default True Make the row labels bold in the output classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table escape : boolean, default True Convert the characters <, >, and & to HTML-safe sequences. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. max_cols : int, optional Maximum number of columns to show before truncating. If None, show all. decimal : string, default '.' Character recognized as decimal separator, e.g. ',' in Europe .. versionadded:: 0.18.0 border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue() def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None): """ Concise summary of a DataFrame. Parameters ---------- verbose : {None, True, False}, optional Whether to print the full summary. None follows the `display.max_info_columns` setting. True or False overrides the `display.max_info_columns` setting. buf : writable buffer, defaults to sys.stdout max_cols : int, default None Determines whether full summary or short summary is printed. None follows the `display.max_info_columns` setting. memory_usage : boolean/string, default None Specifies whether total memory usage of the DataFrame elements (including index) should be displayed. None follows the `display.memory_usage` setting. True or False overrides the `display.memory_usage` setting. A value of 'deep' is equivalent of True, with deep introspection. Memory usage is shown in human-readable units (base-2 representation). null_counts : boolean, default None Whether to show the non-null counts - If None, then only show if the frame is smaller than max_info_rows and max_info_columns. - If True, always show counts. - If False, never show counts. """ from pandas.io.formats.format import _put_lines if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append(str(type(self))) lines.append(self.index.summary()) if len(self.columns) == 0: lines.append('Empty %s' % type(self).__name__) _put_lines(buf, lines) return cols = self.columns # hack if max_cols is None: max_cols = get_option('display.max_info_columns', len(self.columns) + 1) max_rows = get_option('display.max_info_rows', len(self) + 1) if null_counts is None: show_counts = ((len(self.columns) <= max_cols) and (len(self) < max_rows)) else: show_counts = null_counts exceeds_info_cols = len(self.columns) > max_cols def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max(len(pprint_thing(k)) for k in self.columns) + 4 counts = None tmpl = "%s%s" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError('Columns must equal counts (%d != %d)' % (len(cols), len(counts))) tmpl = "%s non-null %s" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes.iloc[i] col = pprint_thing(col) count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl % (count, dtype)) def _non_verbose_repr(): lines.append(self.columns.summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return "%3.1f%s %s" % (num, size_qualifier, x) num /= 1024.0 return "%3.1f%s %s" % (num, size_qualifier, 'PB') if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None _non_verbose_repr() else: if exceeds_info_cols: _non_verbose_repr() else: _verbose_repr() counts = self.get_dtype_counts() dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))] lines.append('dtypes: %s' % ', '.join(dtypes)) if memory_usage is None: memory_usage = get_option('display.memory_usage') if memory_usage: # append memory usage of df to display size_qualifier = '' if memory_usage == 'deep': deep = True else: # size_qualifier is just a best effort; not guaranteed to catch # all cases (e.g., it misses categorical data even with object # categories) deep = False if ('object' in counts or self.index._is_memory_usage_qualified()): size_qualifier = '+' mem_usage = self.memory_usage(index=True, deep=deep).sum() lines.append("memory usage: %s\n" % _sizeof_fmt(mem_usage, size_qualifier)) _put_lines(buf, lines) def memory_usage(self, index=True, deep=False): """Memory usage of DataFrame columns. Parameters ---------- index : bool Specifies whether to include memory usage of DataFrame's index in returned Series. If `index=True` (default is False) the first index of the Series is `Index`. deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- sizes : Series A series with column names as index and memory usage of columns with units of bytes. Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) if index: result = Series(self.index.memory_usage(deep=deep), index=['Index']).append(result) return result def transpose(self, *args, **kwargs): """Transpose index and columns""" nv.validate_transpose(args, dict()) return super(DataFrame, self).transpose(1, 0, **kwargs) T = property(transpose) # ---------------------------------------------------------------------- # Picklability # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state columns = _unpickle_array(cols) index = _unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array # old unpickling (vals, idx, cols), object_state = state index = _unpickle_array(idx) dm = DataFrame(vals, index=index, columns=_unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, columns=_unpickle_array(ocols), copy=False) dm = dm.join(objects) self._data = dm._data # ---------------------------------------------------------------------- # Getting and setting elements def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- value : scalar value """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable) def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) return _maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine try: return engine.get_value(series._values, index) except (TypeError, ValueError): # we cannot handle direct indexing # use positional col = self.columns.get_loc(col) index = self.index.get_loc(index) return self._get_value(index, col, takeable=True) _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : row label col : column label value : scalar value takeable : interpret the index/col as indexers, default False Returns ------- frame : DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable) def _set_value(self, index, col, value, takeable=False): try: if takeable is True: series = self._iget_item_cache(col) return series._set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine engine.set_value(series._values, index, value) return self except (KeyError, TypeError): # set using a non-recursive method & reset the cache self.loc[index, col] = value self._item_cache.pop(col, None) return self _set_value.__doc__ = set_value.__doc__ def _ixs(self, i, axis=0): """ i : int, slice, or sequence of integers axis : int """ # irow if axis == 0: """ Notes ----- If slice passed, the resulting data will be a view """ if isinstance(i, slice): return self[i] else: label = self.index[i] if isinstance(label, Index): # a location index by definition result = self.take(i, axis=axis) copy = True else: new_values = self._data.fast_xs(i) if is_scalar(new_values): return new_values # if we are a copy, mark as such copy = (isinstance(new_values, np.ndarray) and new_values.base is None) result = self._constructor_sliced(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) return result # icol else: """ Notes ----- If slice passed, the resulting data will be a view """ label = self.columns[i] if isinstance(i, slice): # need to return view lab_slice = slice(label[0], label[-1]) return self.loc[:, lab_slice] else: if isinstance(label, Index): return self._take(i, axis=1, convert=True) index_len = len(self.index) # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) if index_len and not len(values): values = np.array([np.nan] * index_len, dtype=object) result = self._constructor_sliced._from_array( values, index=self.index, name=label, fastpath=True) # this is a cached value, mark it so result._set_as_cached(label, self) return result def __getitem__(self, key): key = com._apply_if_callable(key, self) # shortcut if we are an actual column is_mi_columns = isinstance(self.columns, MultiIndex) try: if key in self.columns and not is_mi_columns: return self._getitem_column(key) except: pass # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._getitem_slice(indexer) if isinstance(key, (Series, np.ndarray, Index, list)): # either boolean or fancy integer index return self._getitem_array(key) elif isinstance(key, DataFrame): return self._getitem_frame(key) elif is_mi_columns: return self._getitem_multilevel(key) else: return self._getitem_column(key) def _getitem_column(self, key): """ return the actual column """ # get column if self.columns.is_unique: return self._get_item_cache(key) # duplicate columns & possible reduce dimensionality result = self._constructor(self._data.get(key)) if result.columns.is_unique: result = result[key] return result def _getitem_slice(self, key): return self._slice(key, axis=0) def _getitem_array(self, key): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn("Boolean Series key will be reindexed to match " "DataFrame index.", UserWarning, stacklevel=3) elif len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d.' % (len(key), len(self.index))) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self._take(indexer, axis=0, convert=False) else: indexer = self.loc._convert_to_indexer(key, axis=1) return self._take(indexer, axis=1, convert=True) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = self._constructor(new_values, index=self.index, columns=result_columns) result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == '': result = result[''] if isinstance(result, Series): result = self._constructor_sliced(result, index=self.index, name=key) result._set_is_copy(self) return result else: return self._get_item_cache(key) def _getitem_frame(self, key): if key.values.size and not is_bool_dtype(key.values): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) def query(self, expr, inplace=False, **kwargs): """Query the columns of a frame with a boolean expression. Parameters ---------- expr : string The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. inplace : bool Whether the query should modify the data in place or return a modified copy .. versionadded:: 0.18.0 kwargs : dict See the documentation for :func:`pandas.eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- q : DataFrame Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`pandas.eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. See Also -------- pandas.eval DataFrame.eval Examples -------- >>> from numpy.random import randn >>> from pandas import DataFrame >>> df = pd.DataFrame(randn(10, 2), columns=list('ab')) >>> df.query('a > b') >>> df[df.a > df.b] # same result as the previous expression """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, compat.string_types): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: new_data = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query new_data = self[res] if inplace: self._update_inplace(new_data) else: return new_data def eval(self, expr, inplace=False, **kwargs): """Evaluate an expression in the context of the calling DataFrame instance. Parameters ---------- expr : string The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0 kwargs : dict See the documentation for :func:`~pandas.eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ret : ndarray, scalar, or pandas object See Also -------- pandas.DataFrame.query pandas.DataFrame.assign pandas.eval Notes ----- For more details see the API documentation for :func:`~pandas.eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> from numpy.random import randn >>> from pandas import DataFrame >>> df = pd.DataFrame(randn(10, 2), columns=list('ab')) >>> df.eval('a + b') >>> df.eval('c = a + b') """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() resolvers = dict(self.iteritems()), index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None): """Return a subset of a DataFrame including/excluding columns based on their ``dtype``. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Returns ------- subset : DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Notes ----- * To select all *numeric* types use the numpy dtype ``numpy.number`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use np.datetime64, 'datetime' or 'datetime64' * To select timedeltas, use np.timedelta64, 'timedelta' or 'timedelta64' * To select Pandas categorical dtypes, use 'category' * To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0), or a 'datetime64[ns, tz]' string Examples -------- >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'), ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 0.3962 True 1 1 0.1459 False 2 2 0.2623 True 1 3 0.0764 False 2 4 -0.9703 True 1 5 -1.2094 False 2 >>> df.select_dtypes(include='bool') c 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1 1 2 2 1 3 2 4 1 5 2 >>> df.select_dtypes(exclude=['floating']) b 0 True 1 False 2 True 3 False 4 True 5 False """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map( lambda x: frozenset(map(_get_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on %s' % (include & exclude)) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(column, dtype): return column, functools.partial(issubclass, dtype.type) for column, f in itertools.starmap(is_dtype_instance_mapper, self.dtypes.iteritems()): if include: # checks for the case of empty include or exclude include_these[column] = any(map(f, include)) if exclude: exclude_these[column] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[com._get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: return self._box_col_values(values, items) def _box_col_values(self, values, items): """ provide boxed values for a column """ return self._constructor_sliced._from_array(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): key = com._apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key, value): self._check_setitem_copy() self.loc._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d!' % (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.loc._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: indexer = self.loc._convert_to_indexer(key, axis=1) self._check_setitem_copy() self.loc._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError( 'Array conditional must be same shape as self' ) key = self._constructor(key, **self._construct_axes_dict()) if key.values.size and not is_bool_dtype(key.values): raise TypeError( 'Must pass DataFrame or 2-d ndarray with boolean values only' ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _ensure_valid_index(self, value): """ ensure that if we don't have an index, that we can create one from the passed value """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value): try: value = Series(value) except: raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan) def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs): r""" Assign new columns to a DataFrame, returning a new object (a copy) with all the original columns in addition to the new ones. Parameters ---------- kwargs : keyword, value pairs keywords are the column names. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- df : DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- For python 3.6 and above, the columns are inserted in the order of \*\*kwargs. For python 3.5 and earlier, since \*\*kwargs is unordered, the columns are inserted in alphabetical order at the end of your DataFrame. Assigning multiple columns within the same ``assign`` is possible, but you cannot reference other columns created within the same ``assign`` call. Examples -------- >>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) Where the value is a callable, evaluated on `df`: >>> df.assign(ln_A = lambda x: np.log(x.A)) A B ln_A 0 1 0.426905 0.000000 1 2 -0.780949 0.693147 2 3 -0.418711 1.098612 3 4 -0.269708 1.386294 4 5 -0.274002 1.609438 5 6 -0.500792 1.791759 6 7 1.649697 1.945910 7 8 -1.495604 2.079442 8 9 0.549296 2.197225 9 10 -0.758542 2.302585 Where the value already exists and is inserted: >>> newcol = np.log(df['A']) >>> df.assign(ln_A=newcol) A B ln_A 0 1 0.426905 0.000000 1 2 -0.780949 0.693147 2 3 -0.418711 1.098612 3 4 -0.269708 1.386294 4 5 -0.274002 1.609438 5 6 -0.500792 1.791759 6 7 1.649697 1.945910 7 8 -1.495604 2.079442 8 9 0.549296 2.197225 9 10 -0.758542 2.302585 """ data = self.copy() # do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com._apply_if_callable(v, data) # preserve order for 3.6 and later, but sort by key for 3.5 and earlier if PY36: results = results.items() else: results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data def _sanitize_column(self, key, value, broadcast=True): """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- key : object value : scalar, Series, or array-like broadcast : bool, default True If ``key`` matches multiple duplicate column names in the DataFrame, this parameter indicates whether ``value`` should be tiled so that the returned array contains a (duplicated) column for each occurrence of the key. If False, ``value`` will not be tiled. Returns ------- sanitized_column : numpy-array """ def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value._values.copy() else: # GH 4107 try: value = value.reindex(self.index)._values except Exception as e: # duplicate axis if not value.index.is_unique: raise e # other raise TypeError('incompatible index of inserted column ' 'with frame index') return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, Categorical): value = value.copy() elif isinstance(value, Index) or is_sequence(value): from pandas.core.series import _sanitize_index # turn me into an ndarray value = _sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: value = com._asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): value = value.copy(deep=True) else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = maybe_infer_to_datetimelike(value) else: # upcast the scalar value = cast_scalar_to_array(len(self.index), value) value = maybe_cast_to_datetime(value, value.dtype) # return internal types directly if is_extension_type(value): return value # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: if (not self.columns.is_unique or isinstance(self.columns, MultiIndex)): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): result = {} for idx, item in enumerate(self.columns): result[item] = Series(self._data.iget(idx), index=self.index, name=item) return result def lookup(self, row_labels, col_labels): """Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [] for row, col in zip(row_labels, col_labels): result.append(df.get_value(row, col)) Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, method, copy, level, fill_value, limit, tolerance) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, fill_value, limit, tolerance) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan, limit=None, tolerance=None): new_index, indexer = self.index.reindex(new_index, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_columns(self, new_columns, method, copy, level, fill_value=np.nan, limit=None, tolerance=None): new_columns, indexer = self.columns.reindex(new_columns, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): """ we are guaranteed non-Nones in the axes! """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) @Appender(_shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): return super(DataFrame, self).align(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) @rewrite_axis_style_signature('labels', [('method', None), ('copy', True), ('level', None), ('fill_value', np.nan), ('limit', None), ('tolerance', None)]) def reindex(self, *args, **kwargs): axes = validate_axis_style_args(self, args, kwargs, 'labels', 'reindex') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('labels', None) return super(DataFrame, self).reindex(**kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): return super(DataFrame, self).reindex_axis(labels=labels, axis=axis, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False), ('level', None)]) def rename(self, *args, **kwargs): """Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper, index, columns : dict-like or function, optional dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. axis : int or str, optional Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : boolean, default True Also copy underlying data inplace : boolean, default False Whether to return a new %(klass)s. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. Returns ------- renamed : DataFrame See Also -------- pandas.DataFrame.rename_axis Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) return super(DataFrame, self).rename(**kwargs) @Appender(_shared_docs['fillna'] % _shared_doc_kwargs) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): return super(DataFrame, self).fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, **kwargs) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0): return super(DataFrame, self).shift(periods=periods, freq=freq, axis=axis) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ Set the DataFrame index (row labels) using one or more existing columns. By default yields a new object. Parameters ---------- keys : column label or list of column labels / arrays drop : boolean, default True Delete columns to be used as the new index append : boolean, default False Whether to append columns to existing index inplace : boolean, default False Modify the DataFrame in place (do not create a new object) verify_integrity : boolean, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method Examples -------- >>> df = pd.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale':[55, 40, 84, 31]}) month sale year 0 1 55 2012 1 4 40 2014 2 7 84 2013 3 10 31 2014 Set the index to become the 'month' column: >>> df.set_index('month') sale year month 1 55 2012 4 40 2014 7 84 2013 10 31 2014 Create a multi-index using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 Create a multi-index using a set of values and a column: >>> df.set_index([[1, 2, 3, 4], 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 Returns ------- dataframe : DataFrame """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(keys, list): keys = [keys] if inplace: frame = self else: frame = self.copy() arrays = [] names = [] if append: names = [x for x in self.index.names] if isinstance(self.index, MultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: arrays.append(self.index) to_remove = [] for col in keys: if isinstance(col, MultiIndex): # append all but the last column so we don't have to modify # the end of this loop for n in range(col.nlevels - 1): arrays.append(col._get_level_values(n)) level = col._get_level_values(col.nlevels - 1) names.extend(col.names) elif isinstance(col, Series): level = col._values names.append(col.name) elif isinstance(col, Index): level = col names.append(col.name) elif isinstance(col, (list, np.ndarray, Index)): level = col names.append(None) else: level = frame[col]._values names.append(col) if drop: to_remove.append(col) arrays.append(level) index = _ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index.get_duplicates() raise ValueError('Index has duplicate keys: %s' % duplicates) for c in to_remove: del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): """ For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default drop : boolean, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : boolean, default False Modify the DataFrame in place (do not create a new object) col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- resetted : DataFrame Examples -------- >>> df = pd.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column, and a new sequential index is used: >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, col_fill='genus') genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): if isinstance(index, PeriodIndex): values = index.astype(object).values elif isinstance(index, DatetimeIndex) and index.tz is not None: values = index else: values = index.values if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's if mask.all(): values = np.empty(len(mask)) values.fill(np.nan) else: values = values.take(labels) if mask.any(): values, changed = maybe_upcast_putmask( values, mask, np.nan) return values new_index = _default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if isinstance(self.index, MultiIndex): if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): names = [n if n is not None else ('level_%d' % i) for (i, n) in enumerate(self.index.names)] to_insert = lzip(self.index.levels, self.index.labels) else: default = 'index' if 'index' not in self else 'level_0' names = ([default] if self.index.name is None else [self.index.name]) to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue name = names[i] if multi_col: col_name = (list(name) if isinstance(name, tuple) else [name]) if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError("col_fill=None is incompatible " "with incomplete column name " "{}".format(name)) col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) new_obj.insert(0, name, level_values) new_obj.index = new_index if not inplace: return new_obj # ---------------------------------------------------------------------- # Reindex-based selection methods @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isna(self): return super(DataFrame, self).isna() @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): return super(DataFrame, self).isnull() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notna(self): return super(DataFrame, self).notna() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): return super(DataFrame, self).notnull() def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Return object with labels on given axis omitted where alternately any or all of the data are missing Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof Pass tuple or list to drop on multiple axes how : {'any', 'all'} * any : if any NA values are present, drop that label * all : if all values are NA, drop that label thresh : int, default None int value : require that many non-NA values subset : array-like Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include inplace : boolean, default False If True, do operation inplace and return None. Returns ------- dropped : DataFrame Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, 5]], ... columns=list('ABCD')) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 Drop the columns where all elements are nan: >>> df.dropna(axis=1, how='all') A B D 0 NaN 2.0 0 1 3.0 4.0 1 2 NaN NaN 5 Drop the columns where any of the elements is nan >>> df.dropna(axis=1, how='any') D 0 0 1 1 2 5 Drop the rows where all of the elements are nan (there is no row to drop, so df stays the same): >>> df.dropna(axis=0, how='all') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 Keep only the rows with at least 2 non-na values: >>> df.dropna(thresh=2) A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: %s' % how) else: raise TypeError('must specify how or thresh') result = self._take(mask.nonzero()[0], axis=axis, convert=False) if inplace: self._update_inplace(result) else: return result def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- deduplicated : DataFrame """ inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated).nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated] def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8', copy=False), len(shape) if subset is None: subset = self.columns elif (not np.iterable(subset) or isinstance(subset, compat.string_types) or isinstance(subset, tuple) and subset in self.columns): subset = subset, vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, keep), index=self.index) # ---------------------------------------------------------------------- # Sorting @Appender(_shared_docs['sort_values'] % _shared_doc_kwargs) def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) other_axis = 0 if axis == 1 else 1 if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by (%d)' % (len(ascending), len(by))) if len(by) > 1: from pandas.core.sorting import lexsort_indexer keys = [] for x in by: k = self.xs(x, axis=other_axis).values if k.ndim == 2: raise ValueError('Cannot sort by duplicate column %s' % str(x)) keys.append(k) indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position) indexer = _ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort by = by[0] k = self.xs(by, axis=other_axis).values if k.ndim == 2: # try to be helpful if isinstance(self.columns, MultiIndex): raise ValueError('Cannot sort by column %s in a ' 'multi-index you need to explicitly ' 'provide all the levels' % str(by)) raise ValueError('Cannot sort by duplicate column %s' % str(by)) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort(k, kind=kind, ascending=ascending, na_position=na_position) new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis), verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) @Appender(_shared_docs['sort_index'] % _shared_doc_kwargs) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): # TODO: this can be combined with Series.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') # 10726 if by is not None: warnings.warn("by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) axis = self._get_axis_number(axis) labels = self._get_axis(axis) if level: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer # make sure that the axis is lexsorted to start # if not we need to reconstruct to get the correct indexer labels = labels._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_labels_for_sorting(), orders=ascending, na_position=na_position) else: from pandas.core.sorting import nargsort # Check monotonic-ness before sort an index # GH11080 if ((ascending and labels.is_monotonic_increasing) or (not ascending and labels.is_monotonic_decreasing)): if inplace: return else: return self.copy() indexer = nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) baxis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def sortlevel(self, level=0, axis=0, ascending=True, inplace=False, sort_remaining=True): """ DEPRECATED: use :meth:`DataFrame.sort_index` Sort multilevel index by chosen axis and primary level. Data will be lexicographically sorted by the chosen level followed by the other levels (in order) Parameters ---------- level : int axis : {0 or 'index', 1 or 'columns'}, default 0 ascending : boolean, default True inplace : boolean, default False Sort the DataFrame without creating a new instance sort_remaining : boolean, default True Sort by the other levels too. Returns ------- sorted : DataFrame See Also -------- DataFrame.sort_index(level=...) """ warnings.warn("sortlevel is deprecated, use sort_index(level= ...)", FutureWarning, stacklevel=2) return self.sort_index(level=level, axis=axis, ascending=ascending, inplace=inplace, sort_remaining=sort_remaining) def nlargest(self, n, columns, keep='first'): """Get the rows of a DataFrame sorted by the `n` largest values of `columns`. Parameters ---------- n : int Number of items to retrieve columns : list or str Column name or names to order by keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. Returns ------- DataFrame Examples -------- >>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1], ... 'b': list('abdce'), ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) >>> df.nlargest(3, 'a') a b c 3 11 c 3 1 10 b 2 2 8 d NaN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n, columns, keep='first'): """Get the rows of a DataFrame sorted by the `n` smallest values of `columns`. Parameters ---------- n : int Number of items to retrieve columns : list or str Column name or names to order by keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. Returns ------- DataFrame Examples -------- >>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1], ... 'b': list('abdce'), ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) >>> df.nsmallest(3, 'a') a b c 4 -1 e 4 0 1 a 1 2 8 d NaN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- swapped : type of caller (new object) .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result # ---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None, try_cast=True): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns def _arith_op(left, right): if fill_value is not None: left_mask = isna(left) right_mask = isna(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return func(left, right) if this._is_mixed_type or other._is_mixed_type: # unique if this.columns.is_unique: def f(col): r = _arith_op(this[col].values, other[col].values) return self._constructor_sliced(r, index=new_index, dtype=r.dtype) result = {col: f(col) for col in this} # non-unique else: def f(i): r = _arith_op(this.iloc[:, i].values, other.iloc[:, i].values) return self._constructor_sliced(r, index=new_index, dtype=r.dtype) result = {i: f(i) for i, col in enumerate(this.columns)} result = self._constructor(result, index=new_index, copy=False) result.columns = new_columns return result else: result = _arith_op(this.values, other.values) return self._constructor(result, index=new_index, columns=new_columns, copy=False) def _combine_series(self, other, func, fill_value=None, axis=None, level=None, try_cast=True): if axis is not None: axis = self._get_axis_name(axis) if axis == 'index': return self._combine_match_index(other, func, level=level, fill_value=fill_value, try_cast=try_cast) else: return self._combine_match_columns(other, func, level=level, fill_value=fill_value, try_cast=try_cast) return self._combine_series_infer(other, func, level=level, fill_value=fill_value, try_cast=try_cast) def _combine_series_infer(self, other, func, level=None, fill_value=None, try_cast=True): if len(other) == 0: return self * np.nan if len(self) == 0: # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) return self._combine_match_columns(other, func, level=level, fill_value=fill_value, try_cast=try_cast) def _combine_match_index(self, other, func, level=None, fill_value=None, try_cast=True): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported." % fill_value) return self._constructor(func(left.values.T, right.values).T, index=left.index, columns=self.columns, copy=False) def _combine_match_columns(self, other, func, level=None, fill_value=None, try_cast=True): left, right = self.align(other, join='outer', axis=1, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported" % fill_value) new_data = left._data.eval(func=func, other=right, axes=[left.columns, self.index], try_cast=try_cast) return self._constructor(new_data) def _combine_const(self, other, func, errors='raise', try_cast=True): new_data = self._data.eval(func=func, other=other, errors=errors, try_cast=try_cast) return self._constructor(new_data) def _compare_frame_evaluate(self, other, func, str_rep, try_cast=True): import pandas.core.computation.expressions as expressions # unique if self.columns.is_unique: def _compare(a, b): return {col: func(a[col], b[col]) for col in a.columns} new_data = expressions.evaluate(_compare, str_rep, self, other) return self._constructor(data=new_data, index=self.index, columns=self.columns, copy=False) # non-unique else: def _compare(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i, col in enumerate(a.columns)} new_data = expressions.evaluate(_compare, str_rep, self, other) result = self._constructor(data=new_data, index=self.index, copy=False) result.columns = self.columns return result def _compare_frame(self, other, func, str_rep, try_cast=True): if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') return self._compare_frame_evaluate(other, func, str_rep, try_cast=try_cast) def _flex_compare_frame(self, other, func, str_rep, level, try_cast=True): if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) return self._compare_frame_evaluate(other, func, str_rep, try_cast=try_cast) def combine(self, other, func, fill_value=None, overwrite=True): """ Add two DataFrame objects and do not propagate NaN values, so if for a (column, time) one frame is missing a value, it will default to the other frame's value (which might be NaN as well) Parameters ---------- other : DataFrame func : function Function that takes two series as inputs and return a Series or a scalar fill_value : scalar value overwrite : boolean, default True If True then overwrite values for common keys in the calling frame Returns ------- result : DataFrame Examples -------- >>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2) A B 0 0 3 1 0 3 See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value # if we have different dtypes, possibily promote new_dtype = this_dtype if not is_dtype_equal(this_dtype, other_dtype): new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) # see if we need to be represented as i8 (datetimelike) # try to keep us at this dtype needs_i8_conversion_i = needs_i8_conversion(new_dtype) if needs_i8_conversion_i: arr = func(series, otherSeries, True) else: arr = func(series, otherSeries) if do_fill: arr = _ensure_float(arr) arr[this_mask & other_mask] = np.nan # try to downcast back to the original dtype if needs_i8_conversion_i: # ToDo: This conversion should be handled in # _maybe_cast_to_datetime but the change affects lot... if is_datetime64tz_dtype(new_dtype): arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz) else: arr = maybe_cast_to_datetime(arr, new_dtype) else: arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns)._convert(datetime=True, copy=False) def combine_first(self, other): """ Combine two DataFrame objects and default to non-null values in frame calling the method. Result index columns will be the union of the respective indexes and columns Parameters ---------- other : DataFrame Returns ------- combined : DataFrame Examples -------- df1's values prioritized, use values from df2 to fill holes: >>> df1 = pd.DataFrame([[1, np.nan]]) >>> df2 = pd.DataFrame([[3, 4]]) >>> df1.combine_first(df2) 0 1 0 1 4.0 See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function """ import pandas.core.computation.expressions as expressions def combiner(x, y, needs_i8_conversion=False): x_values = x.values if hasattr(x, 'values') else x y_values = y.values if hasattr(y, 'values') else y if needs_i8_conversion: mask = isna(x) x_values = x_values.view('i8') y_values = y_values.view('i8') else: mask = isna(x_values) return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) def update(self, other, join='left', overwrite=True, filter_func=None, raise_conflict=False): """ Modify DataFrame in place using non-NA values from passed DataFrame. Aligns on indices Parameters ---------- other : DataFrame, or object coercible into a DataFrame join : {'left'}, default 'left' overwrite : boolean, default True If True then overwrite values for common keys in the calling frame filter_func : callable(1d-array) -> 1d-array<boolean>, default None Can choose to replace values other than NA. Return True for values that should be updated raise_conflict : boolean If True, will raise an error if the DataFrame and other both contain data in the same place. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If ``other`` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col].values that = other[col].values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if raise_conflict: mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Misc methods def _get_valid_indices(self): is_valid = self.count(1) > 0 return self.index[is_valid] @Appender(_shared_docs['valid_index'] % { 'position': 'first', 'klass': 'DataFrame'}) def first_valid_index(self): if len(self) == 0: return None valid_indices = self._get_valid_indices() return valid_indices[0] if len(valid_indices) else None @Appender(_shared_docs['valid_index'] % { 'position': 'last', 'klass': 'DataFrame'}) def last_valid_index(self): if len(self) == 0: return None valid_indices = self._get_valid_indices() return valid_indices[-1] if len(valid_indices) else None # ---------------------------------------------------------------------- # Data reshaping def pivot(self, index=None, columns=None, values=None): """ Reshape data (produce a "pivot" table) based on column values. Uses unique values from index / columns to form axes of the resulting DataFrame. Parameters ---------- index : string or object, optional Column name to use to make new frame's index. If None, uses existing index. columns : string or object Column name to use to make new frame's columns values : string or object, optional Column name to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns Returns ------- pivoted : DataFrame See also -------- DataFrame.pivot_table : generalization of pivot that can handle duplicate values for one index/column pair DataFrame.unstack : pivot based on the index values instead of a column Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods Examples -------- >>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'], 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], 'baz': [1, 2, 3, 4, 5, 6]}) >>> df foo bar baz 0 one A 1 1 one B 2 2 one C 3 3 two A 4 4 two B 5 5 two C 6 >>> df.pivot(index='foo', columns='bar', values='baz') A B C one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] A B C one 1 2 3 two 4 5 6 """ from pandas.core.reshape.reshape import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs['pivot_table'] = """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame Parameters ----------%s values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) If dict is passed, the key is column to aggregate and value is function or list of functions fill_value : scalar, default None Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna : boolean, default True Do not include columns whose entries are all NaN margins_name : string, default 'All' Name of the row / column that will contain the totals when margins is True. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]}) >>> df A B C D 0 foo one small 1 1 foo one large 2 2 foo one large 2 3 foo two small 3 4 foo two small 3 5 bar one large 4 6 bar one small 5 7 bar two small 6 8 bar two large 7 >>> table = pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 >>> table = pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E mean max median min A C bar large 5.500000 16 14.5 13 small 5.500000 15 14.5 14 foo large 2.000000 10 9.5 9 small 2.333333 12 11.0 8 Returns ------- table : DataFrame See also -------- DataFrame.pivot : pivot without aggregation that can handle non-numeric data """ @Substitution('') @Appender(_shared_docs['pivot_table']) def pivot_table(self, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All'): from pandas.core.reshape.pivot import pivot_table return pivot_table(self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name) def stack(self, level=-1, dropna=True): """ Pivot a level of the (possibly hierarchical) column labels, returning a DataFrame (or Series in the case of an object with a single level of column labels) having a hierarchical index with a new inner-most level of row labels. The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default last level Level(s) to stack, can pass level name dropna : boolean, default True Whether to drop rows in the resulting Frame/Series with no valid values Examples ---------- >>> s a b one 1. 2. two 3. 4. >>> s.stack() one a 1 b 2 two a 3 b 4 Returns ------- stacked : DataFrame or Series """ from pandas.core.reshape.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values .. versionadded:: 0.18.0 See also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 Returns ------- unstacked : DataFrame or Series """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) _shared_docs['melt'] = (""" "Unpivots" a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. %(versionadded)s Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar Name to use for the 'variable' column. If None it uses ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' Name to use for the 'value' column. col_level : int or string, optional If columns are a MultiIndex then use this level to melt. See also -------- %(other)s pivot_table DataFrame.pivot Examples -------- >>> import pandas as pd >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)sid_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 1 b B 3 2 c B 5 3 a C 2 4 b C 4 5 c C 6 The names of 'variable' and 'value' columns can be customized: >>> %(caller)sid_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] >>> df A B C D E F 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) (A, D) variable_0 variable_1 value 0 a B E 1 1 b B E 3 2 c B E 5 """) @Appender(_shared_docs['melt'] % dict(caller='df.melt(', versionadded='.. versionadded:: 0.20.0\n', other='melt')) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): from pandas.core.reshape.melt import melt return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level) # ---------------------------------------------------------------------- # Time series-related def diff(self, periods=1, axis=0): """ 1st discrete difference of object Parameters ---------- periods : int, default 1 Periods to shift for forming difference axis : {0 or 'index', 1 or 'columns'}, default 0 Take difference over rows (0) or columns (1). .. versionadded:: 0.16.1 Returns ------- diffed : DataFrame """ bm_axis = self._get_block_manager_axis(axis) new_data = self._data.diff(n=periods, axis=bm_axis) return self._constructor(new_data) # ---------------------------------------------------------------------- # Function application def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if subset is None: subset = self # TODO: _shallow_copy(subset)? return self[key] _agg_doc = dedent(""" Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], ... index=pd.date_range('1/1/2000', periods=10)) >>> df.iloc[3:7] = np.nan Aggregate these functions across all columns >>> df.agg(['sum', 'min']) A B C sum -0.182253 -0.614014 -2.909534 min -1.916563 -1.460076 -1.568297 Different aggregations per column >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) A B max NaN 1.514318 min -1.916563 -1.460076 sum -0.182253 NaN See also -------- pandas.DataFrame.apply pandas.DataFrame.transform pandas.DataFrame.groupby.aggregate pandas.DataFrame.resample.aggregate pandas.DataFrame.rolling.aggregate """) @Appender(_agg_doc) @Appender(_shared_docs['aggregate'] % dict( versionadded='.. versionadded:: 0.20.0', **_shared_doc_kwargs)) def aggregate(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) # TODO: flipped axis result = None if axis == 0: try: result, how = self._aggregate(func, axis=0, *args, **kwargs) except TypeError: pass if result is None: return self.apply(func, axis=axis, args=args, **kwargs) return result agg = aggregate def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): """Applies function along input axis of DataFrame. Objects passed to functions are Series objects having index either the DataFrame's index (axis=0) or the columns (axis=1). Return type depends on whether passed function aggregates, or the reduce argument if the DataFrame is empty. Parameters ---------- func : function Function to apply to each column/row axis : {0 or 'index', 1 or 'columns'}, default 0 * 0 or 'index': apply function to each column * 1 or 'columns': apply function to each row broadcast : boolean, default False For aggregation functions, return object of same size with values propagated raw : boolean, default False If False, convert each row or column into a Series. If raw=True the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance reduce : boolean or None, default None Try to apply reduction procedures. If the DataFrame is empty, apply will use reduce to determine whether the result should be a Series or a DataFrame. If reduce is None (the default), apply's return value will be guessed by calling func an empty Series (note: while guessing, exceptions raised by func will be ignored). If reduce is True a Series will always be returned, and if False a DataFrame will always be returned. args : tuple Positional arguments to pass to function in addition to the array/series Additional keyword arguments will be passed as keywords to the function Notes ----- In the current implementation apply calls func twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df.apply(numpy.sqrt) # returns DataFrame >>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0) >>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1) See also -------- DataFrame.applymap: For elementwise operations DataFrame.aggregate: only perform aggregating type operations DataFrame.transform: only perform transformating type operations Returns ------- applied : Series or DataFrame """ from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, broadcast=broadcast, raw=raw, reduce=reduce, args=args, **kwds) return op.get_result() def applymap(self, func): """ Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame Parameters ---------- func : function Python function, returns a single value from a single value Examples -------- >>> df = pd.DataFrame(np.random.randn(3, 3)) >>> df 0 1 2 0 -0.029638 1.081563 1.280300 1 0.647747 0.831136 -1.549481 2 0.513416 -0.884417 0.195343 >>> df = df.applymap(lambda x: '%.2f' % x) >>> df 0 1 2 0 -0.03 1.08 1.28 1 0.65 0.83 -1.55 2 0.51 -0.88 0.20 Returns ------- applied : DataFrame See also -------- DataFrame.apply : For operations on rows/columns """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func) return lib.map_infer(x.astype(object).values, func) return self.apply(infer) # ---------------------------------------------------------------------- # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False): """ Append rows of `other` to the end of this frame, returning a new object. Columns not in this frame are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. Returns ------- appended : DataFrame Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. See also -------- pandas.concat : General function to concatenate DataFrame, Series or Panel objects Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') if other.name is None: index = None else: # other must have the same index name as self, otherwise # index name will be reset index = Index([other.name], name=self.index.name) combined_columns = self.columns.tolist() + self.columns.union( other.index).difference(self.columns).tolist() other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns) other = other._convert(datetime=True, timedelta=True) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.loc[:, self.columns] from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns with other DataFrame either on index or on a key column. Efficiently Join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series with name field set, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame on : name, tuple/list of names, or array-like Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation how : {'left', 'right', 'outer', 'inner'}, default: 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use other frame's index * outer: form union of calling frame's index (or column if on is specified) with other frame's index, and sort it lexicographically * inner: form intersection of calling frame's index (or column if on is specified) with other frame's index, preserving the order of the calling's one lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns sort : boolean, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword) Notes ----- on, lsuffix, and rsuffix options are not supported when passing a list of DataFrame objects Support for specifying index levels as the `on` parameter was added in version 0.23.0 Examples -------- >>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> caller A key 0 A0 K0 1 A1 K1 2 A2 K2 3 A3 K3 4 A4 K4 5 A5 K5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other B key 0 B0 K0 1 B1 K1 2 B2 K2 Join DataFrames using their indexes. >>> caller.join(other, lsuffix='_caller', rsuffix='_other') >>> A key_caller B key_other 0 A0 K0 B0 K0 1 A1 K1 B1 K1 2 A2 K2 B2 K2 3 A3 K3 NaN NaN 4 A4 K4 NaN NaN 5 A5 K5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both caller and other. The joined DataFrame will have key as its index. >>> caller.set_index('key').join(other.set_index('key')) >>> A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the on parameter. DataFrame.join always uses other's index but we can use any column in the caller. This method preserves the original caller's index in the result. >>> caller.join(other.set_index('key'), on='key') >>> A key B 0 A0 K0 B0 1 A1 K1 B1 2 A2 K2 B2 3 A3 K3 NaN 4 A4 K4 NaN 5 A5 K5 NaN See also -------- DataFrame.merge : For column(s)-on-columns(s) operations Returns ------- joined : DataFrame """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): from pandas.core.reshape.merge import merge from pandas.core.reshape.concat import concat if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') # join indexes only using concat if how == 'left': how = 'outer' join_axes = [self.index] else: join_axes = None frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) if can_concat: return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None): from pandas.core.reshape.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate) def round(self, decimals=0, *args, **kwargs): """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. Examples -------- >>> df = pd.DataFrame(np.random.random([3, 3]), ... columns=['A', 'B', 'C'], index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1 0.17 second 0.0 1 0.58 third 0.9 0 0.49 Returns ------- DataFrame object See Also -------- numpy.around Series.round """ from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.iteritems(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(s, decimals): if is_integer_dtype(s) or is_float_dtype(s): return s.round(decimals) return s nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series): if not decimals.index.is_unique: raise ValueError("Index of decimals must be unique") new_cols = [col for col in _dict_round(self, decimals)] elif is_integer(decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.iteritems()] else: raise TypeError("decimals must be an integer, a dict-like or a " "Series") if len(new_cols) > 0: return self._constructor(concat(new_cols, axis=1), index=self.index, columns=self.columns) else: return self # ---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values Parameters ---------- method : {'pearson', 'kendall', 'spearman'} * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for pearson and spearman correlation Returns ------- y : DataFrame """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(_ensure_float64(mat), minp=min_periods) else: if min_periods is None: min_periods = 1 mat = _ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c return self._constructor(correl, index=idx, columns=cols) def cov(self, min_periods=None): """ Compute pairwise covariance of columns, excluding NA/null values Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- y : DataFrame Notes ----- `y` contains the covariance matrix of the DataFrame's time series. The covariance is normalized by N-1 (unbiased estimator). """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if notna(mat).all(): if min_periods is not None and min_periods > len(mat): baseCov = np.empty((mat.shape[1], mat.shape[1])) baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=idx, columns=cols) def corrwith(self, other, axis=0, drop=False): """ Compute pairwise correlation between rows or columns of two DataFrame objects. Parameters ---------- other : DataFrame, Series axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise drop : boolean, default False Drop missing indices from result, default returns union of all Returns ------- correls : Series """ axis = self._get_axis_number(axis) this = self._get_numeric_data() if isinstance(other, Series): return this.apply(other.corr, axis=axis) other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) # mask missing values left = left + right * 0 right = right + left * 0 if axis == 1: left = left.T right = right.T # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom if not drop: raxis = 1 if axis == 0 else 0 result_index = this._get_axis(raxis).union(other._get_axis(raxis)) correl = correl.reindex(result_index) return correl # ---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): """ Return Series with number of non-NA/null observations over requested axis. Works with non-floating point data as well (detects NaN and None) Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a DataFrame numeric_only : boolean, default False Include only float, int, boolean data Returns ------- count : Series (or DataFrame if level specified) """ axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type: result = notna(frame).sum(axis=axis) else: counts = notna(frame.values).sum(axis=axis) result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical %s." % self._get_axis_name(axis)) if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notna(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, compat.string_types): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] labels = _ensure_int64(count_axis.labels[level]) counts = lib.count_level_2d(mask, labels, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): axis = self._get_axis_number(axis) def f(x): return op(x, axis=axis, skipna=skipna, **kwds) labels = self._get_agg_axis(axis) # exclude timedelta/datetime unless we are uniform types if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type: numeric_only = True if numeric_only is None: try: values = self.values result = f(values) except Exception as e: # try by-column first if filter_type is None and axis == 0: try: # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series # we only end up here if we have not specified # numeric_only and yet we have tried a # column-by-column reduction, where we have mixed type. # So let's just do what we can result = self.apply(f, reduce=False, ignore_failures=True) if result.ndim == self.ndim: result = result.iloc[0] return result except: pass if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover e = NotImplementedError("Handling exception with filter_" "type %s not implemented." % filter_type) raise_with_traceback(e) with np.errstate(all='ignore'): result = f(data.values) labels = data._get_agg_axis(axis) else: if numeric_only: if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover msg = ("Generating numeric_only data with filter_type %s" "not supported." % filter_type) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) else: values = self.values result = f(values) if hasattr(result, 'dtype') and is_object_dtype(result.dtype): try: if filter_type is None or filter_type == 'numeric': result = result.astype(np.float64) elif filter_type == 'bool' and notna(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = coerce_to_dtypes(result, self.dtypes) return Series(result, index=labels) def nunique(self, axis=0, dropna=True): """ Return Series with number of distinct observations over requested axis. .. versionadded:: 0.20.0 Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique : Series Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]}) >>> df.nunique() A 3 B 1 >>> df.nunique(axis=1) 0 1 1 2 2 2 """ return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Raises ------ ValueError * If the row/column is empty Returns ------- idxmin : Series Notes ----- This method is the DataFrame version of ``ndarray.argmin``. See Also -------- Series.idxmin """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Raises ------ ValueError * If the row/column is empty Returns ------- idxmax : Series Notes ----- This method is the DataFrame version of ``ndarray.argmax``. See Also -------- Series.idxmax """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): """ let's be explicit about this """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False): """ Gets the mode(s) of each element along the axis selected. Adds a row for each mode per label, fills in gaps with nan. Note that there could be multiple values returned for the selected axis (when more than one item share the maximum frequency), which is the reason why a dataframe is returned. If you want to impute missing values with the mode in a dataframe ``df``, you can just do this: ``df.fillna(df.mode().iloc[0])`` Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 * 0 or 'index' : get mode of each column * 1 or 'columns' : get mode of each row numeric_only : boolean, default False if True, only apply to numeric columns Returns ------- modes : DataFrame (sorted) Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]}) >>> df.mode() A 0 1 1 2 """ data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode() return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear'): """ Return values at the given quantile over requested axis, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute axis : {0, 1, 'index', 'columns'} (default 0) 0 or 'index' for row-wise, 1 or 'columns' for column-wise interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- quantiles : Series or DataFrame - If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. - If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 """ self._check_percentile(q) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 if is_transposed: data = data.T result = data._data.quantile(qs=q, axis=1, interpolation=interpolation, transposed=is_transposed) if result.ndim == 2: result = self._constructor(result) else: result = self._constructor_sliced(result, name=q) if is_transposed: result = result.T return result def to_timestamp(self, freq=None, how='start', axis=0, copy=True): """ Cast to DatetimeIndex of timestamps, at *beginning* of period Parameters ---------- freq : string, default frequency of PeriodIndex Desired frequency how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default) copy : boolean, default True If false then underlying input data is not copied Returns ------- df : DataFrame with DatetimeIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how)) elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): """ Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed) Parameters ---------- freq : string, default axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default) copy : boolean, default True If False then underlying input data is not copied Returns ------- ts : TimeSeries with PeriodIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) return self._constructor(new_data) def isin(self, values): """ Return boolean DataFrame showing whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dictionary The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dictionary, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame of booleans Examples -------- When ``values`` is a list: >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) >>> df.isin([1, 3, 12, 'a']) A B 0 True True 1 False False 2 True False When ``values`` is a dict: >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]}) >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]}) A B 0 True False # Note that B didn't match the 1 here. 1 False True 2 True True When ``values`` is a Series or DataFrame: >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']}) >>> df.isin(other) A B 0 True False 1 False False # Column A in `other` has a 3, but not at index 1. 2 True True """ if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are " "allowed to be passed to DataFrame.isin(), " "you passed a " "{0!r}".format(type(values).__name__)) return DataFrame( algorithms.isin(self.values.ravel(), values).reshape(self.shape), self.index, self.columns) # ---------------------------------------------------------------------- # Add plotting methods to DataFrame plot = accessor.AccessorProperty(gfx.FramePlotMethods, gfx.FramePlotMethods) hist = gfx.hist_frame boxplot = gfx.boxplot_frame DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}) DataFrame._add_numeric_operations() DataFrame._add_series_or_dataframe_operations() ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ # figure out the index, if necessary if index is None: index = extract_index(arrays) else: index = _ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] return create_block_manager_from_arrays(arrays, arr_names, axes) def extract_index(data): from pandas.core.index import _union_indexes index = None if len(data) == 0: index = Index([]) elif len(data) > 0: raw_lengths = [] indexes = [] have_raw_arrays = False have_series = False have_dicts = False for v in data: if isinstance(v, Series): have_series = True indexes.append(v.index) elif isinstance(v, dict): have_dicts = True indexes.append(list(v.keys())) elif is_list_like(v) and getattr(v, 'ndim', 1) == 1: have_raw_arrays = True raw_lengths.append(len(v)) if not indexes and not raw_lengths: raise ValueError('If using all scalar values, you must pass' ' an index') if have_series or have_dicts: index = _union_indexes(indexes) if have_raw_arrays: lengths = list(set(raw_lengths)) if len(lengths) > 1: raise ValueError('arrays must all be same length') if have_dicts: raise ValueError('Mixing dicts with non-Series may lead to ' 'ambiguous ordering.') if have_series: if lengths[0] != len(index): msg = ('array length %d does not match index length %d' % (lengths[0], len(index))) raise ValueError(msg) else: index = _default_index(lengths[0]) return _ensure_index(index) def _prep_ndarray(values, copy=True): if not isinstance(values, (np.ndarray, Series, Index)): if len(values) == 0: return np.empty((0, 0), dtype=object) def convert(v): return maybe_convert_platform(v) # we could have a 1-dim or 2-dim list here # this is equiv of np.asarray, but does object conversion # and platform dtype preservation try: if is_list_like(values[0]) or hasattr(values[0], 'len'): values = np.array([convert(v) for v in values]) else: values = convert(values) except: values = convert(values) else: # drop subclass info, do not copy data values = np.asarray(values) if copy: values = values.copy() if values.ndim == 1: values = values.reshape((values.shape[0], 1)) elif values.ndim != 2: raise ValueError('Must pass 2-d input') return values def _to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns """ if isinstance(data, DataFrame): if columns is not None: arrays = [data._ixs(i, axis=1).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], collections.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Series): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = _default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ extract from a masked rec array and create the manager """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = _get_names_from_index(fdata) if index is None: index = _default_index(len(data)) index = _ensure_index(index) if columns is not None: columns = _ensure_index(columns) arrays, arr_columns = _to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = _arrays_to_mgr(arrays, arr_columns, index, columns) if copy: mgr = mgr.copy() return mgr def _reorder_arrays(arrays, arr_columns, columns): # reorder according to the columns if (columns is not None and len(columns) and arr_columns is not None and len(arr_columns)): indexer = _ensure_index(arr_columns).get_indexer(columns) arr_columns = _ensure_index([arr_columns[i] for i in indexer]) arrays = [arrays[i] for i in indexer] return arrays, arr_columns def _list_to_arrays(data, columns, coerce_float=False, dtype=None): if len(data) > 0 and isinstance(data[0], tuple): content = list(lib.to_object_array_tuples(data).T) else: # list of lists content = list(lib.to_object_array(data).T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): from pandas.core.index import _get_objs_combined_axis if columns is None: columns = _get_objs_combined_axis(data) indexer_cache = {} aligned_values = [] for s in data: index = getattr(s, 'index', None) if index is None: index = _default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) values = _values_from_object(s) aligned_values.append(algorithms.take_1d(values, indexer)) values = np.vstack(aligned_values) if values.dtype == np.object_: content = list(values.T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) else: return values.T, columns def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): if columns is None: gen = (list(x.keys()) for x in data) sort = not any(isinstance(d, OrderedDict) for d in data) columns = lib.fast_unique_multiple_list_gen(gen, sort=sort) # assure that they are of the base dict class and not of derived # classes data = [(type(d) is dict) and d or dict(d) for d in data] content = list(lib.dicts_to_array(data, list(columns)).T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: columns = _default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... raise AssertionError('%d columns passed, passed data had %s ' 'columns' % (len(columns), len(content))) # provide soft conversion of object dtypes def convert(arr): if dtype != object and dtype != np.object: arr = lib.maybe_convert_objects(arr, try_float=coerce_float) arr = maybe_cast_to_datetime(arr, dtype) return arr arrays = [convert(arr) for arr in content] return arrays, columns def _get_names_from_index(data): has_some_name = any(getattr(s, 'name', None) is not None for s in data) if not has_some_name: return _default_index(len(data)) index = lrange(len(data)) count = 0 for i, s in enumerate(data): n = getattr(s, 'name', None) if n is not None: index[i] = n else: index[i] = 'Unnamed %d' % count count += 1 return index def _homogenize(data, index, dtype=None): from pandas.core.series import _sanitize_array oindex = None homogenized = [] for v in data: if isinstance(v, Series): if dtype is not None: v = v.astype(dtype) if v.index is not index: # Forces alignment. No need to copy data since we # are putting it into an ndarray later v = v.reindex(index, copy=False) else: if isinstance(v, dict): if oindex is None: oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): v = _dict_compat(v) else: v = dict(v) v = lib.fast_multiget(v, oindex.values, default=np.nan) v = _sanitize_array(v, index, dtype=dtype, copy=False, raise_cast_failure=False) homogenized.append(v) return homogenized def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() for index, s in compat.iteritems(data): for col, v in compat.iteritems(s): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data def _put_str(s, space): return ('%s' % s)[:space].ljust(space)
bsd-3-clause
nesterione/scikit-learn
benchmarks/bench_lasso.py
295
3305
""" Benchmarks of Lasso vs LassoLars First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import gc from time import time import numpy as np from sklearn.datasets.samples_generator import make_regression def compute_bench(alpha, n_samples, n_features, precompute): lasso_results = [] lars_lasso_results = [] it = 0 for ns in n_samples: for nf in n_features: it += 1 print('==================') print('Iteration %s of %s' % (it, max(len(n_samples), len(n_features)))) print('==================') n_informative = nf // 10 X, Y, coef_ = make_regression(n_samples=ns, n_features=nf, n_informative=n_informative, noise=0.1, coef=True) X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data gc.collect() print("- benchmarking Lasso") clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute) tstart = time() clf.fit(X, Y) lasso_results.append(time() - tstart) gc.collect() print("- benchmarking LassoLars") clf = LassoLars(alpha=alpha, fit_intercept=False, normalize=False, precompute=precompute) tstart = time() clf.fit(X, Y) lars_lasso_results.append(time() - tstart) return lasso_results, lars_lasso_results if __name__ == '__main__': from sklearn.linear_model import Lasso, LassoLars import pylab as pl alpha = 0.01 # regularization parameter n_features = 10 list_n_samples = np.linspace(100, 1000000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples, [n_features], precompute=True) pl.figure('scikit-learn LASSO benchmark results') pl.subplot(211) pl.plot(list_n_samples, lasso_results, 'b-', label='Lasso') pl.plot(list_n_samples, lars_lasso_results, 'r-', label='LassoLars') pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha)) pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') pl.axis('tight') n_samples = 2000 list_n_features = np.linspace(500, 3000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples], list_n_features, precompute=False) pl.subplot(212) pl.plot(list_n_features, lasso_results, 'b-', label='Lasso') pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars') pl.title('%d samples, alpha=%s' % (n_samples, alpha)) pl.legend(loc='upper left') pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
KECB/learn
machine_learning/NN_code_release/handle_other_algo_data.py
1
2605
import pickle import numpy as np from sklearn.metrics.cluster import adjusted_rand_score from sklearn import metrics import sys def gen_SNNCliq_data_file(fileName,outFileName): with open(fileName, 'rb') as handle: output_dict = pickle.load(handle) #transform_data=output_dict['test_X_TPMgn0'] transform_data=output_dict['test_X'] valid_Y=output_dict['test_Y_array'] data=np.transpose(transform_data) nrow=data.shape[0] ncol=data.shape[1] #outC=open(outFileName+'_cell.txt','w') #outC.write('Cell\tSample\n') outF=open(outFileName,'w') outF.write('Sample') for i in range(ncol): outF.write('\tS'+str(i)) #outC.write('S'+str(i)+'\t'+str(i)+'\n') outF.write('\n') for i in range(nrow): out=map(str,data[i]) outF.write('Gene'+str(i)+'\t'+'\t'.join(out)+'\n') outA=open(outFileName+'_Answer.txt','w') outA.write('\t'.join(map(str,valid_Y))+"\n") def gen_SNNCliq_ARI_file(SNNIDXfile,answerfile,outFileName,dataFileName=None): SNN_lines=open(SNNIDXfile).readlines() SNNs=[] for line in SNN_lines: SNNs.append(line.replace('\n','')) print SNNs print len(SNNs) answer=open(answerfile).readlines()[0].replace('\n','').split('\t') print answer print len(answer) #ARI= adjusted_rand_score(SNNs,answer) if dataFileName: with open(dataFileName, 'rb') as handle: output_dict = pickle.load(handle) #transform_data=output_dict['test_X_TPMgn0'] data=output_dict['test_X'] score={} score['homogeneity_score'] = metrics.homogeneity_score(answer,SNNs) score['completeness_score'] = metrics.completeness_score(answer,SNNs) score['v_measure_score'] = metrics.v_measure_score(answer,SNNs) score['adjusted_rand_score'] = metrics.adjusted_rand_score(answer,SNNs) score['adjusted_mutual_info_score'] = metrics.adjusted_mutual_info_score(answer,SNNs) score['silhouette_score'] = metrics.silhouette_score(data, SNNs) score['fowlkes_mallows_score'] = metrics.fowlkes_mallows_score(answer,SNNs) #print ARI print score outfile = open(outFileName,'w') for key,val in score.items(): outfile.write(key+'\t'+str(val)+'\n') if __name__=='__main__': print sys.argv if sys.argv[1]=='1': #gen_SNNCliq_data_file('seed-1_vct8_boot0_1.pickle','test.output') gen_SNNCliq_data_file(sys.argv[2],sys.argv[3]) else: #gen_SNNCliq_ARI_file('IDX.txt','test.output_Answer.txt','ARI.txt') gen_SNNCliq_ARI_file(sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5])
mit
waynenilsen/statsmodels
statsmodels/discrete/tests/test_discrete.py
19
55886
""" Tests for discrete models Notes ----- DECIMAL_3 is used because it seems that there is a loss of precision in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson tests. """ # pylint: disable-msg=E1101 from statsmodels.compat.python import range import os import numpy as np from numpy.testing import (assert_, assert_raises, assert_almost_equal, assert_equal, assert_array_equal, assert_allclose, assert_array_less) from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit, Poisson, NegativeBinomial) from statsmodels.discrete.discrete_margins import _iscount, _isdummy import statsmodels.api as sm import statsmodels.formula.api as smf from nose import SkipTest from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes from statsmodels.tools.sm_exceptions import PerfectSeparationError try: import cvxopt has_cvxopt = True except ImportError: has_cvxopt = False try: from scipy.optimize import basinhopping has_basinhopping = True except ImportError: has_basinhopping = False DECIMAL_14 = 14 DECIMAL_10 = 10 DECIMAL_9 = 9 DECIMAL_4 = 4 DECIMAL_3 = 3 DECIMAL_2 = 2 DECIMAL_1 = 1 DECIMAL_0 = 0 class CheckModelResults(object): """ res2 should be the test results from RModelWrap or the results as defined in model_results_data """ def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) def test_conf_int(self): assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5) def test_zstat(self): assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4) def pvalues(self): assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4) # def test_cov_params(self): # assert_almost_equal(self.res1.cov_params(), self.res2.cov_params, # DECIMAL_4) def test_llf(self): assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4) def test_llnull(self): assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4) def test_llr(self): assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3) def test_llr_pvalue(self): assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue, DECIMAL_4) def test_normalized_cov_params(self): pass def test_bse(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4) def test_dof(self): assert_equal(self.res1.df_model, self.res2.df_model) assert_equal(self.res1.df_resid, self.res2.df_resid) def test_aic(self): assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3) def test_bic(self): assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3) def test_predict(self): assert_almost_equal(self.res1.model.predict(self.res1.params), self.res2.phat, DECIMAL_4) def test_predict_xb(self): assert_almost_equal(self.res1.model.predict(self.res1.params, linear=True), self.res2.yhat, DECIMAL_4) def test_loglikeobs(self): #basic cross check llobssum = self.res1.model.loglikeobs(self.res1.params).sum() assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14) def test_jac(self): #basic cross check jacsum = self.res1.model.score_obs(self.res1.params).sum(0) score = self.res1.model.score(self.res1.params) assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ? class CheckBinaryResults(CheckModelResults): def test_pred_table(self): assert_array_equal(self.res1.pred_table(), self.res2.pred_table) def test_resid_dev(self): assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev, DECIMAL_4) def test_resid_generalized(self): assert_almost_equal(self.res1.resid_generalized, self.res2.resid_generalized, DECIMAL_4) def smoke_test_resid_response(self): self.res1.resid_response class CheckMargEff(object): """ Test marginal effects (margeff) and its options """ def test_nodummy_dydxoverall(self): me = self.res1.get_margeff() assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dydx, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dydx_se, DECIMAL_4) def test_nodummy_dydxmean(self): me = self.res1.get_margeff(at='mean') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dydxmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4) def test_nodummy_dydxmedian(self): me = self.res1.get_margeff(at='median') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dydxmedian, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4) def test_nodummy_dydxzero(self): me = self.res1.get_margeff(at='zero') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dydxzero, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dydxzero, DECIMAL_4) def test_nodummy_dyexoverall(self): me = self.res1.get_margeff(method='dyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dyex, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dyex_se, DECIMAL_4) def test_nodummy_dyexmean(self): me = self.res1.get_margeff(at='mean', method='dyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dyexmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4) def test_nodummy_dyexmedian(self): me = self.res1.get_margeff(at='median', method='dyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dyexmedian, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4) def test_nodummy_dyexzero(self): me = self.res1.get_margeff(at='zero', method='dyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_dyexzero, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4) def test_nodummy_eydxoverall(self): me = self.res1.get_margeff(method='eydx') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eydx, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eydx_se, DECIMAL_4) def test_nodummy_eydxmean(self): me = self.res1.get_margeff(at='mean', method='eydx') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eydxmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4) def test_nodummy_eydxmedian(self): me = self.res1.get_margeff(at='median', method='eydx') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eydxmedian, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4) def test_nodummy_eydxzero(self): me = self.res1.get_margeff(at='zero', method='eydx') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eydxzero, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4) def test_nodummy_eyexoverall(self): me = self.res1.get_margeff(method='eyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eyex, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eyex_se, DECIMAL_4) def test_nodummy_eyexmean(self): me = self.res1.get_margeff(at='mean', method='eyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eyexmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4) def test_nodummy_eyexmedian(self): me = self.res1.get_margeff(at='median', method='eyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eyexmedian, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4) def test_nodummy_eyexzero(self): me = self.res1.get_margeff(at='zero', method='eyex') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_eyexzero, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4) def test_dummy_dydxoverall(self): me = self.res1.get_margeff(dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_dydx, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_dydx_se, DECIMAL_4) def test_dummy_dydxmean(self): me = self.res1.get_margeff(at='mean', dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_dydxmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_dydxmean_se, DECIMAL_4) def test_dummy_eydxoverall(self): me = self.res1.get_margeff(method='eydx', dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_eydx, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_eydx_se, DECIMAL_4) def test_dummy_eydxmean(self): me = self.res1.get_margeff(at='mean', method='eydx', dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_eydxmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_eydxmean_se, DECIMAL_4) def test_count_dydxoverall(self): me = self.res1.get_margeff(count=True) assert_almost_equal(me.margeff, self.res2.margeff_count_dydx, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_count_dydx_se, DECIMAL_4) def test_count_dydxmean(self): me = self.res1.get_margeff(count=True, at='mean') assert_almost_equal(me.margeff, self.res2.margeff_count_dydxmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_count_dydxmean_se, DECIMAL_4) def test_count_dummy_dydxoverall(self): me = self.res1.get_margeff(count=True, dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4) def test_count_dummy_dydxmean(self): me = self.res1.get_margeff(count=True, dummy=True, at='mean') assert_almost_equal(me.margeff, self.res2.margeff_count_dummy_dydxmean, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4) class TestProbitNewton(CheckBinaryResults): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0) res2 = Spector() res2.probit() cls.res2 = res2 #def test_predict(self): # assert_almost_equal(self.res1.model.predict(self.res1.params), # self.res2.predict, DECIMAL_4) class TestProbitBFGS(CheckBinaryResults): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs", disp=0) res2 = Spector() res2.probit() cls.res2 = res2 class TestProbitNM(CheckBinaryResults): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) res2 = Spector() res2.probit() cls.res2 = res2 cls.res1 = Probit(data.endog, data.exog).fit(method="nm", disp=0, maxiter=500) class TestProbitPowell(CheckBinaryResults): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) res2 = Spector() res2.probit() cls.res2 = res2 cls.res1 = Probit(data.endog, data.exog).fit(method="powell", disp=0, ftol=1e-8) class TestProbitCG(CheckBinaryResults): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) res2 = Spector() res2.probit() cls.res2 = res2 # fmin_cg fails to converge on some machines - reparameterize from statsmodels.tools.transform_model import StandardizeTransform transf = StandardizeTransform(data.exog) exog_st = transf(data.exog) res1_st = Probit(data.endog, exog_st).fit(method="cg", disp=0, maxiter=1000, gtol=1e-08) start_params = transf.transform_params(res1_st.params) assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6) cls.res1 = Probit(data.endog, data.exog).fit(start_params=start_params, method="cg", maxiter=1000, gtol=1e-05, disp=0) assert_array_less(cls.res1.mle_retvals['fcalls'], 100) class TestProbitNCG(CheckBinaryResults): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) res2 = Spector() res2.probit() cls.res2 = res2 cls.res1 = Probit(data.endog, data.exog).fit(method="ncg", disp=0, avextol=1e-8, warn_convergence=False) # converges close enough but warnflag is 2 for precision loss class TestProbitBasinhopping(CheckBinaryResults): @classmethod def setupClass(cls): if not has_basinhopping: raise SkipTest("Skipped TestProbitBasinhopping since" " basinhopping solver is not available") data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) res2 = Spector() res2.probit() cls.res2 = res2 fit = Probit(data.endog, data.exog).fit cls.res1 = fit(method="basinhopping", disp=0, niter=5, minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8}) class CheckLikelihoodModelL1(object): """ For testing results generated with L1 regularization """ def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) def test_conf_int(self): assert_almost_equal( self.res1.conf_int(), self.res2.conf_int, DECIMAL_4) def test_bse(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4) def test_nnz_params(self): assert_almost_equal( self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4) def test_aic(self): assert_almost_equal( self.res1.aic, self.res2.aic, DECIMAL_3) def test_bic(self): assert_almost_equal( self.res1.bic, self.res2.bic, DECIMAL_3) class TestProbitL1(CheckLikelihoodModelL1): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0] cls.res1 = Probit(data.endog, data.exog).fit_regularized( method="l1", alpha=alpha, disp=0, trim_mode='auto', auto_trim_tol=0.02, acc=1e-10, maxiter=1000) res2 = DiscreteL1() res2.probit() cls.res2 = res2 def test_cov_params(self): assert_almost_equal( self.res1.cov_params(), self.res2.cov_params, DECIMAL_4) class TestMNLogitL1(CheckLikelihoodModelL1): @classmethod def setupClass(cls): anes_data = sm.datasets.anes96.load() anes_exog = anes_data.exog anes_exog = sm.add_constant(anes_exog, prepend=False) mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog) alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0] alpha[-1,:] = 0 cls.res1 = mlogit_mod.fit_regularized( method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02, acc=1e-10, disp=0) res2 = DiscreteL1() res2.mnlogit() cls.res2 = res2 class TestLogitL1(CheckLikelihoodModelL1): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0] cls.res1 = Logit(data.endog, data.exog).fit_regularized( method="l1", alpha=cls.alpha, disp=0, trim_mode='size', size_trim_tol=1e-5, acc=1e-10, maxiter=1000) res2 = DiscreteL1() res2.logit() cls.res2 = res2 def test_cov_params(self): assert_almost_equal( self.res1.cov_params(), self.res2.cov_params, DECIMAL_4) class TestCVXOPT(object): @classmethod def setupClass(self): self.data = sm.datasets.spector.load() self.data.exog = sm.add_constant(self.data.exog, prepend=True) def test_cvxopt_versus_slsqp(self): #Compares resutls from cvxopt to the standard slsqp if has_cvxopt: self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0] res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized( method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000, trim_mode='auto') res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized( method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10, trim_mode='auto', auto_trim_tol=0.01, maxiter=1000) assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4) else: raise SkipTest("Skipped test_cvxopt since cvxopt is not available") class TestSweepAlphaL1(object): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) cls.model = Logit(data.endog, data.exog) cls.alphas = np.array( [[0.1, 0.1, 0.1, 0.1], [0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]]) #/ data.exog.shape[0] cls.res1 = DiscreteL1() cls.res1.sweep() def test_sweep_alpha(self): for i in range(3): alpha = self.alphas[i, :] res2 = self.model.fit_regularized( method="l1", alpha=alpha, disp=0, acc=1e-10, trim_mode='off', maxiter=1000) assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4) class CheckL1Compatability(object): """ Tests compatability between l1 and unregularized by setting alpha such that certain parameters should be effectively unregularized, and others should be ignored by the model. """ def test_params(self): m = self.m assert_almost_equal( self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4) # The last entry should be close to zero # handle extra parameter of NegativeBinomial kvars = self.res_reg.model.exog.shape[1] assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4) def test_cov_params(self): m = self.m # The restricted cov_params should be equal assert_almost_equal( self.res_unreg.cov_params()[:m, :m], self.res_reg.cov_params()[:m, :m], DECIMAL_1) def test_df(self): assert_equal(self.res_unreg.df_model, self.res_reg.df_model) assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid) def test_t_test(self): m = self.m kvars = self.kvars # handle extra parameter of NegativeBinomial extra = getattr(self, 'k_extra', 0) t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params))) t_reg = self.res_reg.t_test(np.eye(kvars + extra)) assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3) assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3) assert_almost_equal(np.nan, t_reg.sd[m]) assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3) assert_almost_equal(np.nan, t_reg.tvalue[m]) def test_f_test(self): m = self.m kvars = self.kvars # handle extra parameter of NegativeBinomial extra = getattr(self, 'k_extra', 0) f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m]) f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m]) assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3) assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3) def test_bad_r_matrix(self): kvars = self.kvars assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) ) class TestPoissonL1Compatability(CheckL1Compatability): @classmethod def setupClass(cls): cls.kvars = 10 # Number of variables cls.m = 7 # Number of unregularized parameters rand_data = sm.datasets.randhie.load() rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1) rand_exog = sm.add_constant(rand_exog, prepend=True) # Drop some columns and do an unregularized fit exog_no_PSI = rand_exog[:, :cls.m] mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI) cls.res_unreg = mod_unreg.fit(method="newton", disp=False) # Do a regularized fit with alpha, effectively dropping the last column alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars) alpha[:cls.m] = 0 cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized( method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000, trim_mode='auto') class TestNegativeBinomialL1Compatability(CheckL1Compatability): @classmethod def setupClass(cls): cls.kvars = 10 # Number of variables cls.m = 7 # Number of unregularized parameters rand_data = sm.datasets.randhie.load() rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1) rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0) rand_exog = sm.add_constant(rand_exog_st, prepend=True) # Drop some columns and do an unregularized fit exog_no_PSI = rand_exog[:, :cls.m] mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI) cls.res_unreg = mod_unreg.fit(method="newton", disp=False) # Do a regularized fit with alpha, effectively dropping the last column alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1) alpha[:cls.m] = 0 alpha[-1] = 0 # don't penalize alpha mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog) cls.res_reg = mod_reg.fit_regularized( method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000, trim_mode='auto') cls.k_extra = 1 # 1 extra parameter in nb2 class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability): @classmethod def setupClass(cls): cls.kvars = 10 # Number of variables cls.m = 7 # Number of unregularized parameters rand_data = sm.datasets.randhie.load() rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1) rand_exog = sm.add_constant(rand_exog, prepend=True) # Drop some columns and do an unregularized fit exog_no_PSI = rand_exog[:, :cls.m] mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI, loglike_method='geometric') cls.res_unreg = mod_unreg.fit(method="newton", disp=False) # Do a regularized fit with alpha, effectively dropping the last columns alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars) alpha[:cls.m] = 0 mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog, loglike_method='geometric') cls.res_reg = mod_reg.fit_regularized( method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000, trim_mode='auto') assert_equal(mod_reg.loglike_method, 'geometric') class TestLogitL1Compatability(CheckL1Compatability): @classmethod def setupClass(cls): cls.kvars = 4 # Number of variables cls.m = 3 # Number of unregularized parameters data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) # Do a regularized fit with alpha, effectively dropping the last column alpha = np.array([0, 0, 0, 10]) cls.res_reg = Logit(data.endog, data.exog).fit_regularized( method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000, trim_mode='auto') # Actually drop the last columnand do an unregularized fit exog_no_PSI = data.exog[:, :cls.m] cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15) class TestMNLogitL1Compatability(CheckL1Compatability): @classmethod def setupClass(cls): cls.kvars = 4 # Number of variables cls.m = 3 # Number of unregularized parameters data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) alpha = np.array([0, 0, 0, 10]) cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized( method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000, trim_mode='auto') # Actually drop the last columnand do an unregularized fit exog_no_PSI = data.exog[:, :cls.m] cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit( disp=0, tol=1e-15, method='bfgs', maxiter=1000) def test_t_test(self): m = self.m kvars = self.kvars t_unreg = self.res_unreg.t_test(np.eye(m)) t_reg = self.res_reg.t_test(np.eye(kvars)) assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3) assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3) assert_almost_equal(np.nan, t_reg.sd[m]) assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3) def test_f_test(self): raise SkipTest("Skipped test_f_test for MNLogit") class TestProbitL1Compatability(CheckL1Compatability): @classmethod def setupClass(cls): cls.kvars = 4 # Number of variables cls.m = 3 # Number of unregularized parameters data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) alpha = np.array([0, 0, 0, 10]) cls.res_reg = Probit(data.endog, data.exog).fit_regularized( method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000, trim_mode='auto') # Actually drop the last columnand do an unregularized fit exog_no_PSI = data.exog[:, :cls.m] cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15) class CompareL1(object): """ For checking results for l1 regularization. Assumes self.res1 and self.res2 are two legitimate models to be compared. """ def test_basic_results(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4) assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4) assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4) assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4) assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4) assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4) assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4) assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4) assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4) class CompareL11D(CompareL1): """ Check t and f tests. This only works for 1-d results """ def test_tests(self): restrictmat = np.eye(len(self.res1.params.ravel())) assert_almost_equal(self.res1.t_test(restrictmat).pvalue, self.res2.t_test(restrictmat).pvalue, DECIMAL_4) assert_almost_equal(self.res1.f_test(restrictmat).pvalue, self.res2.f_test(restrictmat).pvalue, DECIMAL_4) class TestL1AlphaZeroLogit(CompareL11D): """ Compares l1 model with alpha = 0 to the unregularized model. """ @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) cls.res1 = Logit(data.endog, data.exog).fit_regularized( method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000, trim_mode='auto', auto_trim_tol=0.01) cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15) class TestL1AlphaZeroProbit(CompareL11D): """ Compares l1 model with alpha = 0 to the unregularized model. """ @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) cls.res1 = Probit(data.endog, data.exog).fit_regularized( method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000, trim_mode='auto', auto_trim_tol=0.01) cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15) class TestL1AlphaZeroMNLogit(CompareL1): @classmethod def setupClass(cls): data = sm.datasets.anes96.load() data.exog = sm.add_constant(data.exog, prepend=False) cls.res1 = MNLogit(data.endog, data.exog).fit_regularized( method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000, trim_mode='auto', auto_trim_tol=0.01) cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15, method='bfgs', maxiter=1000) class TestLogitNewton(CheckBinaryResults, CheckMargEff): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0) res2 = Spector() res2.logit() cls.res2 = res2 def test_resid_pearson(self): assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson, 5) def test_nodummy_exog1(self): me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}) assert_almost_equal(me.margeff, self.res2.margeff_nodummy_atexog1, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_atexog1_se, DECIMAL_4) def test_nodummy_exog2(self): me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean') assert_almost_equal(me.margeff, self.res2.margeff_nodummy_atexog2, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_atexog2_se, DECIMAL_4) def test_dummy_exog1(self): me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_atexog1, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_atexog1_se, DECIMAL_4) def test_dummy_exog2(self): me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean', dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_atexog2, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_atexog2_se, DECIMAL_4) class TestLogitBFGS(CheckBinaryResults, CheckMargEff): @classmethod def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=False) res2 = Spector() res2.logit() cls.res2 = res2 cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0) class TestPoissonNewton(CheckModelResults): @classmethod def setupClass(cls): data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0) res2 = RandHIE() res2.poisson() cls.res2 = res2 def test_margeff_overall(self): me = self.res1.get_margeff() assert_almost_equal(me.margeff, self.res2.margeff_nodummy_overall, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_nodummy_overall_se, DECIMAL_4) def test_margeff_dummy_overall(self): me = self.res1.get_margeff(dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dummy_overall, DECIMAL_4) assert_almost_equal(me.margeff_se, self.res2.margeff_dummy_overall_se, DECIMAL_4) def test_resid(self): assert_almost_equal(self.res1.resid, self.res2.resid, 2) def test_predict_prob(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) probs_res = np.loadtxt(os.path.join(cur_dir, "results", "predict_prob_poisson.csv"), delimiter=",") # just check the first 100 obs. vs R to save memory probs = self.res1.predict_prob()[:100] assert_almost_equal(probs, probs_res, 8) class TestNegativeBinomialNB2Newton(CheckModelResults): @classmethod def setupClass(cls): data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0) res2 = RandHIE() res2.negativebinomial_nb2_bfgs() cls.res2 = res2 def test_jac(self): pass #NOTE: The bse is much closer precitions to stata def test_bse(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) def test_alpha(self): self.res1.bse # attaches alpha_std_err assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, DECIMAL_4) assert_almost_equal(self.res1.lnalpha_std_err, self.res2.lnalpha_std_err, DECIMAL_4) def test_conf_int(self): assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3) def test_zstat(self): # Low precision because Z vs. t assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues, DECIMAL_2) def test_fittedvalues(self): assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3) def test_predict(self): assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3) def test_predict_xb(self): assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3) def no_info(self): pass test_jac = no_info class TestNegativeBinomialNB1Newton(CheckModelResults): @classmethod def setupClass(cls): data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit( method="newton", maxiter=100, disp=0) res2 = RandHIE() res2.negativebinomial_nb1_bfgs() cls.res2 = res2 def test_zstat(self): assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1) def test_lnalpha(self): self.res1.bse # attaches alpha_std_err assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3) assert_almost_equal(self.res1.lnalpha_std_err, self.res2.lnalpha_std_err, DECIMAL_4) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) def test_conf_int(self): # the bse for alpha is not high precision from the hessian # approximation assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_2) def test_jac(self): pass def test_predict(self): pass def test_predict_xb(self): pass class TestNegativeBinomialNB2BFGS(CheckModelResults): @classmethod def setupClass(cls): data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit( method='bfgs', disp=0, maxiter=1000) res2 = RandHIE() res2.negativebinomial_nb2_bfgs() cls.res2 = res2 def test_jac(self): pass #NOTE: The bse is much closer precitions to stata def test_bse(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) def test_alpha(self): self.res1.bse # attaches alpha_std_err assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, DECIMAL_4) assert_almost_equal(self.res1.lnalpha_std_err, self.res2.lnalpha_std_err, DECIMAL_4) def test_conf_int(self): assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3) def test_zstat(self): # Low precision because Z vs. t assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues, DECIMAL_2) def test_fittedvalues(self): assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3) def test_predict(self): assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3) def test_predict_xb(self): assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3) def no_info(self): pass test_jac = no_info class TestNegativeBinomialNB1BFGS(CheckModelResults): @classmethod def setupClass(cls): data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs", maxiter=100, disp=0) res2 = RandHIE() res2.negativebinomial_nb1_bfgs() cls.res2 = res2 def test_zstat(self): assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1) def test_lnalpha(self): self.res1.bse # attaches alpha_std_err assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3) assert_almost_equal(self.res1.lnalpha_std_err, self.res2.lnalpha_std_err, DECIMAL_4) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4) def test_conf_int(self): # the bse for alpha is not high precision from the hessian # approximation assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_2) def test_jac(self): pass def test_predict(self): pass def test_predict_xb(self): pass class TestNegativeBinomialGeometricBFGS(CheckModelResults): """ Cannot find another implementation of the geometric to cross-check results we only test fitted values because geometric has fewer parameters than nb1 and nb2 and we want to make sure that predict() np.dot(exog, params) works """ @classmethod def setupClass(cls): data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=False) cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0) res2 = RandHIE() res2.negativebinomial_geometric_bfgs() cls.res2 = res2 # the following are regression tests, could be inherited instead def test_aic(self): assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3) def test_bic(self): assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3) def test_conf_int(self): assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3) def test_fittedvalues(self): assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3) def test_jac(self): pass def test_predict(self): assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3) def test_predict_xb(self): assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3) def test_zstat(self): # Low precision because Z vs. t assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1) def no_info(self): pass def test_llf(self): assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1) def test_llr(self): assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2) def test_bse(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3) test_jac = no_info class CheckMNLogitBaseZero(CheckModelResults): def test_margeff_overall(self): me = self.res1.get_margeff() assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6) assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6) def test_margeff_mean(self): me = self.res1.get_margeff(at='mean') assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7) assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7) def test_margeff_dummy(self): data = self.data vote = data.data['vote'] exog = np.column_stack((data.exog, vote)) exog = sm.add_constant(exog, prepend=False) res = MNLogit(data.endog, exog).fit(method="newton", disp=0) me = res.get_margeff(dummy=True) assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall, 6) assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_dummy_overall_se, 6) me = res.get_margeff(dummy=True, method="eydx") assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall, 5) assert_almost_equal(me.margeff_se, self.res2.margeff_eydx_dummy_overall_se, 6) def test_j(self): assert_equal(self.res1.model.J, self.res2.J) def test_k(self): assert_equal(self.res1.model.K, self.res2.K) def test_endog_names(self): assert_equal(self.res1._get_endog_name(None,None)[1], ['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6']) def test_pred_table(self): # fitted results taken from gretl pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0, 1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1, 1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1, 1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0, 0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0, 0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0, 5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0, 6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0, 6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5, 0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0, 0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0, 1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6, 1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0, 1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0, 0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5, 6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6, 1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0, 0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1, 0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1, 0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6, 5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6, 6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5, 0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6, 6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5, 0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5, 0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1, 6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6, 0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0, 1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0, 1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0, 0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5, 5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0, 0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5, 6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0, 0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5, 6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6, 1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6, 6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6, 6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6, 5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1, 6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6, 5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6, 5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5] assert_array_equal(self.res1.predict().argmax(1), pred) # the rows should add up for pred table assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred)) # note this is just a regression test, gretl doesn't have a prediction # table pred = [[ 126., 41., 2., 0., 0., 12., 19.], [ 77., 73., 3., 0., 0., 15., 12.], [ 37., 43., 2., 0., 0., 19., 7.], [ 12., 9., 1., 0., 0., 9., 6.], [ 19., 10., 2., 0., 0., 20., 43.], [ 22., 25., 1., 0., 0., 31., 71.], [ 9., 7., 1., 0., 0., 18., 140.]] assert_array_equal(self.res1.pred_table(), pred) def test_resid(self): assert_array_equal(self.res1.resid_misclassified, self.res2.resid) class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero): @classmethod def setupClass(cls): data = sm.datasets.anes96.load() cls.data = data exog = data.exog exog = sm.add_constant(exog, prepend=False) cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0) res2 = Anes() res2.mnlogit_basezero() cls.res2 = res2 class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero): @classmethod def setupClass(cls): data = sm.datasets.anes96.load() cls.data = data exog = data.exog exog = sm.add_constant(exog, prepend=False) mymodel = MNLogit(data.endog, exog) cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000, #m=12, pgtol=1e-7, factr=1e3, # 5 failures #m=20, pgtol=1e-8, factr=1e2, # 3 failures #m=30, pgtol=1e-9, factr=1e1, # 1 failure m=40, pgtol=1e-10, factr=5e0, loglike_and_score=mymodel.loglike_and_score) res2 = Anes() res2.mnlogit_basezero() cls.res2 = res2 def test_perfect_prediction(): cur_dir = os.path.dirname(os.path.abspath(__file__)) iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results') iris_dir = os.path.abspath(iris_dir) iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",", skip_header=1) y = iris[:,-1] X = iris[:,:-1] X = X[y != 2] y = y[y != 2] X = sm.add_constant(X, prepend=True) mod = Logit(y,X) assert_raises(PerfectSeparationError, mod.fit, maxiter=1000) #turn off raise PerfectSeparationError mod.raise_on_perfect_prediction = False # this will raise if you set maxiter high enough with a singular matrix from pandas.util.testing import assert_produces_warning # this is not thread-safe with assert_produces_warning(): mod.fit(disp=False, maxiter=50) # should not raise but does warn def test_poisson_predict(): #GH: 175, make sure poisson predict works without offset and exposure data = sm.datasets.randhie.load() exog = sm.add_constant(data.exog, prepend=True) res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0) pred1 = res.predict() pred2 = res.predict(exog) assert_almost_equal(pred1, pred2) #exta options pred3 = res.predict(exog, offset=0, exposure=1) assert_almost_equal(pred1, pred3) pred3 = res.predict(exog, offset=0, exposure=2) assert_almost_equal(2*pred1, pred3) pred3 = res.predict(exog, offset=np.log(2), exposure=1) assert_almost_equal(2*pred1, pred3) def test_poisson_newton(): #GH: 24, Newton doesn't work well sometimes nobs = 10000 np.random.seed(987689) x = np.random.randn(nobs, 3) x = sm.add_constant(x, prepend=True) y_count = np.random.poisson(np.exp(x.sum(1))) mod = sm.Poisson(y_count, x) from pandas.util.testing import assert_produces_warning # this is not thread-safe with assert_produces_warning(): res = mod.fit(start_params=-np.ones(4), method='newton', disp=0) assert_(not res.mle_retvals['converged']) def test_issue_339(): # make sure MNLogit summary works for J != K. data = sm.datasets.anes96.load() exog = data.exog # leave out last exog column exog = exog[:,:-1] exog = sm.add_constant(exog, prepend=True) res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0) # strip the header from the test smry = "\n".join(res1.summary().as_text().split('\n')[9:]) cur_dir = os.path.dirname(os.path.abspath(__file__)) test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt') test_case = open(test_case_file, 'r').read() np.testing.assert_equal(smry, test_case[:-1]) def test_issue_341(): data = sm.datasets.anes96.load() exog = data.exog # leave out last exog column exog = exog[:,:-1] exog = sm.add_constant(exog, prepend=True) res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0) x = exog[0] np.testing.assert_equal(res1.predict(x).shape, (1,7)) np.testing.assert_equal(res1.predict(x[None]).shape, (1,7)) def test_iscount(): X = np.random.random((50, 10)) X[:,2] = np.random.randint(1, 10, size=50) X[:,6] = np.random.randint(1, 10, size=50) X[:,4] = np.random.randint(0, 2, size=50) X[:,1] = np.random.randint(-10, 10, size=50) # not integers count_ind = _iscount(X) assert_equal(count_ind, [2, 6]) def test_isdummy(): X = np.random.random((50, 10)) X[:,2] = np.random.randint(1, 10, size=50) X[:,6] = np.random.randint(0, 2, size=50) X[:,4] = np.random.randint(0, 2, size=50) X[:,1] = np.random.randint(-10, 10, size=50) # not integers count_ind = _isdummy(X) assert_equal(count_ind, [4, 6]) def test_non_binary(): y = [1, 2, 1, 2, 1, 2] X = np.random.randn(6, 2) np.testing.assert_raises(ValueError, Logit, y, X) def test_mnlogit_factor(): dta = sm.datasets.anes96.load_pandas() dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG'))) dta.exog['constant'] = 1 mod = sm.MNLogit(dta.endog, dta.exog) res = mod.fit(disp=0) # smoke tests params = res.params summary = res.summary() # with patsy del dta.exog['constant'] mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data) res2 = mod.fit(disp=0) res2.params summary = res2.summary() def test_formula_missing_exposure(): # see 2083 import statsmodels.formula.api as smf import pandas as pd d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan], 'constant': [1] * 4, 'exposure' : np.random.uniform(size=4), 'x': [1, 3, 2, 1.5]} df = pd.DataFrame(d) # should work mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure']) assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray') # make sure this raises exposure = pd.Series(np.random.randn(5)) assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']], exposure=exposure) if __name__ == "__main__": import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
bsd-3-clause
infoxchange/lettuce
tests/integration/lib/Django-1.2.5/django/contrib/gis/utils/geoip.py
316
14811
""" This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R) C API (http://www.maxmind.com/app/c). This is an alternative to the GPL licensed Python GeoIP interface provided by MaxMind. GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts. For IP-based geolocation, this module requires the GeoLite Country and City datasets, in binary format (CSV will not work!). The datasets may be downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/. Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples below for more details. TODO: Verify compatibility with Windows. Example: >>> from django.contrib.gis.utils import GeoIP >>> g = GeoIP() >>> g.country('google.com') {'country_code': 'US', 'country_name': 'United States'} >>> g.city('72.14.207.99') {'area_code': 650, 'city': 'Mountain View', 'country_code': 'US', 'country_code3': 'USA', 'country_name': 'United States', 'dma_code': 807, 'latitude': 37.419200897216797, 'longitude': -122.05740356445312, 'postal_code': '94043', 'region': 'CA'} >>> g.lat_lon('salon.com') (37.789798736572266, -122.39420318603516) >>> g.lon_lat('uh.edu') (-95.415199279785156, 29.77549934387207) >>> g.geos('24.124.1.80').wkt 'POINT (-95.2087020874023438 39.0392990112304688)' """ import os, re from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER from ctypes.util import find_library from django.conf import settings if not settings.configured: settings.configure() # Creating the settings dictionary with any settings, if needed. GEOIP_SETTINGS = dict((key, getattr(settings, key)) for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY') if hasattr(settings, key)) lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None) # GeoIP Exception class. class GeoIPException(Exception): pass # The shared library for the GeoIP C API. May be downloaded # from http://www.maxmind.com/download/geoip/api/c/ if lib_path: lib_name = None else: # TODO: Is this really the library name for Windows? lib_name = 'GeoIP' # Getting the path to the GeoIP library. if lib_name: lib_path = find_library(lib_name) if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). ' 'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name) lgeoip = CDLL(lib_path) # Regular expressions for recognizing IP addresses and the GeoIP # free database editions. ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$') free_regex = re.compile(r'^GEO-\d{3}FREE') lite_regex = re.compile(r'^GEO-\d{3}LITE') #### GeoIP C Structure definitions #### class GeoIPRecord(Structure): _fields_ = [('country_code', c_char_p), ('country_code3', c_char_p), ('country_name', c_char_p), ('region', c_char_p), ('city', c_char_p), ('postal_code', c_char_p), ('latitude', c_float), ('longitude', c_float), # TODO: In 1.4.6 this changed from `int dma_code;` to # `union {int metro_code; int dma_code;};`. Change # to a `ctypes.Union` in to accomodate in future when # pre-1.4.6 versions are no longer distributed. ('dma_code', c_int), ('area_code', c_int), # TODO: The following structure fields were added in 1.4.3 -- # uncomment these fields when sure previous versions are no # longer distributed by package maintainers. #('charset', c_int), #('continent_code', c_char_p), ] class GeoIPTag(Structure): pass #### ctypes function prototypes #### RECTYPE = POINTER(GeoIPRecord) DBTYPE = POINTER(GeoIPTag) # For retrieving records by name or address. def record_output(func): func.restype = RECTYPE return func rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr) rec_by_name = record_output(lgeoip.GeoIP_record_by_name) # For opening & closing GeoIP database files. geoip_open = lgeoip.GeoIP_open geoip_open.restype = DBTYPE geoip_close = lgeoip.GeoIP_delete geoip_close.argtypes = [DBTYPE] geoip_close.restype = None # String output routines. def string_output(func): func.restype = c_char_p return func geoip_dbinfo = string_output(lgeoip.GeoIP_database_info) cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr) cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name) cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr) cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name) #### GeoIP class #### class GeoIP(object): # The flags for GeoIP memory caching. # GEOIP_STANDARD - read database from filesystem, uses least memory. # # GEOIP_MEMORY_CACHE - load database into memory, faster performance # but uses more memory # # GEOIP_CHECK_CACHE - check for updated database. If database has been updated, # reload filehandle and/or memory cache. # # GEOIP_INDEX_CACHE - just cache # the most frequently accessed index portion of the database, resulting # in faster lookups than GEOIP_STANDARD, but less memory usage than # GEOIP_MEMORY_CACHE - useful for larger databases such as # GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region # and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE # GEOIP_STANDARD = 0 GEOIP_MEMORY_CACHE = 1 GEOIP_CHECK_CACHE = 2 GEOIP_INDEX_CACHE = 4 cache_options = dict((opt, None) for opt in (0, 1, 2, 4)) _city_file = '' _country_file = '' # Initially, pointers to GeoIP file references are NULL. _city = None _country = None def __init__(self, path=None, cache=0, country=None, city=None): """ Initializes the GeoIP object, no parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP data sets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.dat) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH settings attribute. * cache: The cache settings when opening up the GeoIP datasets, and may be an integer in (0, 1, 2, 4) corresponding to the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE, and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings, respectively. Defaults to 0, meaning that the data is read from the disk. * country: The name of the GeoIP country data file. Defaults to 'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute. * city: The name of the GeoIP city data file. Defaults to 'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute. """ # Checking the given cache option. if cache in self.cache_options: self._cache = self.cache_options[cache] else: raise GeoIPException('Invalid caching option: %s' % cache) # Getting the GeoIP data path. if not path: path = GEOIP_SETTINGS.get('GEOIP_PATH', None) if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.') if not isinstance(path, basestring): raise TypeError('Invalid path type: %s' % type(path).__name__) if os.path.isdir(path): # Constructing the GeoIP database filenames using the settings # dictionary. If the database files for the GeoLite country # and/or city datasets exist, then try and open them. country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat')) if os.path.isfile(country_db): self._country = geoip_open(country_db, cache) self._country_file = country_db city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat')) if os.path.isfile(city_db): self._city = geoip_open(city_db, cache) self._city_file = city_db elif os.path.isfile(path): # Otherwise, some detective work will be needed to figure # out whether the given database path is for the GeoIP country # or city databases. ptr = geoip_open(path, cache) info = geoip_dbinfo(ptr) if lite_regex.match(info): # GeoLite City database detected. self._city = ptr self._city_file = path elif free_regex.match(info): # GeoIP Country database detected. self._country = ptr self._country_file = path else: raise GeoIPException('Unable to recognize database edition: %s' % info) else: raise GeoIPException('GeoIP path must be a valid file or directory.') def __del__(self): # Cleaning any GeoIP file handles lying around. if self._country: geoip_close(self._country) if self._city: geoip_close(self._city) def _check_query(self, query, country=False, city=False, city_or_country=False): "Helper routine for checking the query and database availability." # Making sure a string was passed in for the query. if not isinstance(query, basestring): raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__) # Extra checks for the existence of country and city databases. if city_or_country and not (self._country or self._city): raise GeoIPException('Invalid GeoIP country and city data files.') elif country and not self._country: raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file) elif city and not self._city: raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file) def city(self, query): """ Returns a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None). """ self._check_query(query, city=True) if ipregex.match(query): # If an IP address was passed in ptr = rec_by_addr(self._city, c_char_p(query)) else: # If a FQDN was passed in. ptr = rec_by_name(self._city, c_char_p(query)) # Checking the pointer to the C structure, if valid pull out elements # into a dicionary and return. if bool(ptr): record = ptr.contents return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_) else: return None def country_code(self, query): "Returns the country code for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_code_by_addr(self._country, query) else: return cntry_code_by_name(self._country, query) else: return self.city(query)['country_code'] def country_name(self, query): "Returns the country name for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_name_by_addr(self._country, query) else: return cntry_name_by_name(self._country, query) else: return self.city(query)['country_name'] def country(self, query): """ Returns a dictonary with with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters. """ # Returning the country code and name return {'country_code' : self.country_code(query), 'country_name' : self.country_name(query), } #### Coordinate retrieval routines #### def coords(self, query, ordering=('longitude', 'latitude')): cdict = self.city(query) if cdict is None: return None else: return tuple(cdict[o] for o in ordering) def lon_lat(self, query): "Returns a tuple of the (longitude, latitude) for the given query." return self.coords(query) def lat_lon(self, query): "Returns a tuple of the (latitude, longitude) for the given query." return self.coords(query, ('latitude', 'longitude')) def geos(self, query): "Returns a GEOS Point object for the given query." ll = self.lon_lat(query) if ll: from django.contrib.gis.geos import Point return Point(ll, srid=4326) else: return None #### GeoIP Database Information Routines #### def country_info(self): "Returns information about the GeoIP country database." if self._country is None: ci = 'No GeoIP Country data in "%s"' % self._country_file else: ci = geoip_dbinfo(self._country) return ci country_info = property(country_info) def city_info(self): "Retuns information about the GeoIP city database." if self._city is None: ci = 'No GeoIP City data in "%s"' % self._city_file else: ci = geoip_dbinfo(self._city) return ci city_info = property(city_info) def info(self): "Returns information about all GeoIP databases in use." return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info) info = property(info) #### Methods for compatibility w/the GeoIP-Python API. #### @classmethod def open(cls, full_path, cache): return GeoIP(full_path, cache) def _rec_by_arg(self, arg): if self._city: return self.city(arg) else: return self.country(arg) region_by_addr = city region_by_name = city record_by_addr = _rec_by_arg record_by_name = _rec_by_arg country_code_by_addr = country_code country_code_by_name = country_code country_name_by_addr = country_name country_name_by_name = country_name
gpl-3.0
mrcslws/nupic.research
projects/greedy_infomax/experiments/block_sparse_optimization.py
3
5783
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2021, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- from copy import deepcopy import ray.tune as tune import torch from nupic.research.frameworks.greedy_infomax.experiments.data_parallel_block_model_experiment import DataParallelBlockModelExperiment # noqa E501 from nupic.research.frameworks.greedy_infomax.models.block_model import BlockModel from nupic.research.frameworks.greedy_infomax.models.classification_model import ( MultiClassifier, ) from nupic.research.frameworks.greedy_infomax.utils.loss_utils import ( all_module_losses, multiple_cross_entropy_supervised, ) from nupic.research.frameworks.greedy_infomax.utils.model_utils import ( small_resnet, small_sparse_70_resnet, small_sparse_80_resnet, ) from .block_wise_training import CONFIGS as BLOCK_WISE_CONFIGS FULL_RESNET_50 = BLOCK_WISE_CONFIGS["full_resnet_50"] # 5 epochs SigOpt optimization NUM_EPOCHS = 10 NUM_GPUS = 8 small_dense_resnet_args = {"module_args": small_resnet} small_sparse_70_resnet_args = {"module_args": small_sparse_70_resnet} small_sparse_80_resnet_args = {"module_args": small_sparse_80_resnet} SMALL_DENSE_ONE_CYCLE_GRID_SEARCH = deepcopy(FULL_RESNET_50) SMALL_DENSE_ONE_CYCLE_GRID_SEARCH.update(dict( experiment_class=DataParallelBlockModelExperiment, wandb_args=dict( project="greedy_infomax_small_resnet_onecycle", name=f"small_dense" ), epochs=NUM_EPOCHS, epochs_to_validate=[NUM_EPOCHS - 1, ], # loss distributed=False, supervised_training_epochs_per_validation=50, # Uncomment this section for small batches / debugging purposes # batches_in_epoch=2, # batches_in_epoch_val=2, # batches_in_epoch_supervised=2, # batch_size = 2, # batch_size_supervised=2, # val_batch_size=2, # Drop last to avoid weird batches unsupervised_loader_drop_last=True, supervised_loader_drop_last=True, validation_loader_drop_last=True, batch_size=16 * NUM_GPUS, # Multiply by num_gpus batch_size_supervised=16 * NUM_GPUS, val_batch_size=16 * NUM_GPUS, model_class=BlockModel, model_args=small_dense_resnet_args, optimizer_class=torch.optim.Adam, optimizer_args=dict(lr=2e-4), loss_function=all_module_losses, find_unused_parameters=True, device_ids=list(range(NUM_GPUS)), pin_memory=False, lr_scheduler_class=torch.optim.lr_scheduler.OneCycleLR, lr_scheduler_args=dict( # 0.001, 0.003, 0.01 # 0.006, 0.008 max_lr=tune.grid_search([0.0045, 0.0055, 0.0065]), # max_lr=tune.grid_search([0.19, 0.2, 0.21, 0.22, 0.213]), div_factor=100, # initial_lr = 0.01 final_div_factor=1000, # min_lr = 0.0000025 pct_start=1.0 / 10.0, epochs=NUM_EPOCHS, anneal_strategy="linear", max_momentum=1e-4, cycle_momentum=False, ), cuda_launch_blocking=False, classifier_config=dict( model_class=MultiClassifier, model_args=dict(num_classes=10), loss_function=multiple_cross_entropy_supervised, # Classifier Optimizer class. Must inherit from "torch.optim.Optimizer" optimizer_class=torch.optim.Adam, # Optimizer class class arguments passed to the constructor optimizer_args=dict(lr=2e-4), distributed=False, ), )) SMALL_SPARSE_70_ONE_CYCE_GRID_SEARCH = deepcopy(SMALL_DENSE_ONE_CYCLE_GRID_SEARCH) SMALL_SPARSE_70_ONE_CYCE_GRID_SEARCH.update( wandb_args=dict( project="greedy_infomax_small_resnet_onecycle", name=f"small_sparse_70" ), model_args=small_sparse_70_resnet_args, lr_scheduler_args=dict( max_lr=tune.grid_search([0.0045, 0.0055, 0.006]), div_factor=100, # initial_lr = 0.01 final_div_factor=1000, # min_lr = 0.0000025 pct_start=1.0 / 10.0, epochs=NUM_EPOCHS, anneal_strategy="linear", max_momentum=1e-4, cycle_momentum=False, ), ) SMALL_SPARSE_80_ONE_CYCE_GRID_SEARCH = deepcopy(SMALL_DENSE_ONE_CYCLE_GRID_SEARCH) SMALL_SPARSE_80_ONE_CYCE_GRID_SEARCH.update( wandb_args=dict( project="greedy_infomax_small_resnet_onecycle", name=f"small_sparse_80" ), model_args=small_sparse_80_resnet_args, lr_scheduler_args=dict( max_lr=tune.grid_search([0.004, 0.008, 0.012]), # max_lr=tune.grid_search([0.19, 0.2, 0.21, 0.22, 0.213]), div_factor=100, # initial_lr = 0.01 final_div_factor=1000, # min_lr = 0.0000025 pct_start=1.0 / 10.0, epochs=NUM_EPOCHS, anneal_strategy="linear", max_momentum=1e-4, cycle_momentum=False, ), ) CONFIGS = dict( small_dense_one_cycle=SMALL_DENSE_ONE_CYCLE_GRID_SEARCH, small_sparse_70_one_cycle=SMALL_SPARSE_70_ONE_CYCE_GRID_SEARCH, small_sparse_80_one_cycle=SMALL_SPARSE_80_ONE_CYCE_GRID_SEARCH, )
agpl-3.0
mrcslws/nupic.research
projects/whydense/cifar/mobilenet_tune.py
4
6722
# Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2019, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # # Original Code here: # https://github.com/pytorch/examples/blob/master/mnist/main.py import os import click import ray import ray.tune as tune import torch from torchvision import datasets from nupic.research.support.parse_config import parse_config from projects.whydense.cifar.mobilenet_cifar import MobileNetCIFAR10 class MobileNetTune(tune.Trainable): """ray.tune trainable class for running MobileNet CIFAR experiments:""" def _setup(self, config): import ray # noqa F401 import ray.tune as tune # noqa F401 import torch.nn as nn # noqa F401 from nupic.research.frameworks.pytorch.models import ( # noqa F401 MobileNetV1, mobile_net_v1_sparse_point, mobile_net_v1_sparse_depth, ) # Cannot pass functions to "ray.tune". Make sure to use string in the config # and evaluate during "_setup" config["loss_function"] = eval(config["loss_function"], globals(), locals()) config["model_type"] = eval(config["model_type"], globals(), locals()) self.experiment = MobileNetCIFAR10(config) def _train(self): self.experiment.train(self._iteration) return self.experiment.test() def _save(self, checkpoint_dir): return self.experiment.save(checkpoint_dir) def _restore(self, checkpoint_dir): self.experiment.restore(checkpoint_dir) @ray.remote def run_experiment(config, trainable, num_cpus=1, num_gpus=0): """Run a single tune experiment in parallel as a "remote" function. :param config: The experiment configuration :type config: dict :param trainable: tune.Trainable class with your experiment :type trainable: :class:`ray.tune.Trainable` """ resources_per_trial = {"cpu": num_cpus, "gpu": num_gpus} print("experiment =", config["name"]) print("resources_per_trial =", resources_per_trial) # Stop criteria. Default to total number of iterations/epochs stop_criteria = {"training_iteration": config.get("iterations")} stop_criteria.update(config.get("stop", {})) print("stop_criteria =", stop_criteria) tune.run( trainable, name=config["name"], stop=stop_criteria, config=config, resources_per_trial=resources_per_trial, num_samples=config.get("repetitions", 1), local_dir=config.get("path", None), upload_dir=config.get("upload_dir", None), sync_function=config.get("sync_function", None), checkpoint_freq=config.get("checkpoint_freq", 0), checkpoint_at_end=config.get("checkpoint_at_end", False), export_formats=config.get("", None), search_alg=config.get("search_alg", None), scheduler=config.get("scheduler", None), verbose=config.get("verbose", 2), resume=config.get("resume", False), queue_trials=config.get("queue_trials", False), reuse_actors=config.get("reuse_actors", False), trial_executor=config.get("trial_executor", None), raise_on_failed_trial=config.get("raise_on_failed_trial", True), ) @click.command() @click.option( "-c", "--config", type=open, default="mobilenet_experiments.cfg", show_default=True, help="your experiments config file", ) @click.option( "-e", "--experiments", multiple=True, help="run only selected experiments, by default run all " "experiments in config file.", ) @click.option( "-n", "--num_cpus", type=int, default=os.cpu_count(), show_default=True, help="number of cpus you want to use", ) @click.option( "-g", "--num_gpus", type=int, default=torch.cuda.device_count(), show_default=True, help="number of gpus you want to use", ) @click.option("--redis-address", help="Ray Cluster redis address") def main(config, experiments, num_cpus, num_gpus, redis_address): print("config =", config.name) print("experiments =", experiments) print("num_gpus =", num_gpus) print("num_cpus =", num_cpus) print("redis_address =", redis_address) # Use configuration file location as the project location. project_dir = os.path.dirname(config.name) project_dir = os.path.abspath(project_dir) print("project_dir =", project_dir) # Load and parse experiment configurations configs = parse_config(config, experiments, globals_param=globals()) # Pre-download dataset data_dir = os.path.join(project_dir, "data") datasets.CIFAR10(data_dir, download=True, train=True) # Initialize ray cluster if redis_address is not None: ray.init(redis_address=redis_address, include_webui=True) num_cpus = 1 else: ray.init(num_cpus=num_cpus, num_gpus=num_gpus, local_mode=num_cpus == 1) # Run all experiments in parallel results = [] for exp in configs: config = configs[exp] config["name"] = exp # Make sure local directories are relative to the project location path = config.get("path", None) if path and not os.path.isabs(path): config["path"] = os.path.join(project_dir, path) data_dir = config.get("data_dir", "data") if not os.path.isabs(data_dir): config["data_dir"] = os.path.join(project_dir, data_dir) # When running multiple hyperparameter searches on different experiments, # ray.tune will run one experiment at the time. We use "ray.remote" to # run each tune experiment in parallel as a "remote" function and wait until # all experiments complete results.append( run_experiment.remote( config, MobileNetTune, num_cpus=1, num_gpus=num_gpus / num_cpus ) ) # Wait for all experiments to complete ray.get(results) ray.shutdown() if __name__ == "__main__": main()
agpl-3.0
mrcslws/nupic.research
projects/imagenet/experiments/snr_pruning.py
3
6687
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2020, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import copy import numpy as np import torch import torch.nn.init import torch.optim from ray import tune from nupic.research.frameworks.backprop_structure.model_conversion import ( maskedvdrop_to_sparseweights, ) from nupic.research.frameworks.backprop_structure.modules import ( MaskedVDropCentralData, prunable_vdrop_conv2d, prunable_vdrop_linear, ) from nupic.research.frameworks.backprop_structure.networks import vdrop_resnet50 from nupic.research.frameworks.pytorch.models.pretrained_models import resnet50_swsl from nupic.research.frameworks.pytorch.models.resnets import resnet50 from nupic.research.frameworks.pytorch.modules import sparse_conv2d, sparse_linear from nupic.research.frameworks.vernon.distributed import ImagenetExperiment, mixins from .base import DEFAULT class SNPruneExperiment(mixins.LogEveryLoss, mixins.LogEveryLearningRate, mixins.KnowledgeDistillation, mixins.MultiCycleLR, mixins.PruneLowSNRLayers, mixins.RegularizeLoss, mixins.ConstrainParameters, ImagenetExperiment): pass def conv_target_density(in_channels, out_channels, kernel_size): """25% dense everywhere except the stem""" if kernel_size == 7: return 1.0 else: return 0.25 def make_reg_schedule(epochs, pct_ramp_start, pct_ramp_end, peak_value, pct_drop, final_value): def reg_schedule(epoch, batch_idx, steps_per_epoch): pct = (epoch + batch_idx / steps_per_epoch) / epochs if pct < pct_ramp_start: return 0.0 elif pct < pct_ramp_end: progress = (pct - pct_ramp_start) / (pct_ramp_end - pct_ramp_start) return progress * peak_value elif pct < pct_drop: return peak_value else: return final_value return reg_schedule NUM_CLASSES = 1000 SUBSEQUENT_LR_SCHED_ARGS = dict( max_lr=6.0, pct_start=0.0625, anneal_strategy="linear", base_momentum=0.6, max_momentum=0.75, cycle_momentum=True, final_div_factor=1000.0 ) SNR_PRUNE_BASE = copy.deepcopy(DEFAULT) SNR_PRUNE_BASE.update(dict( experiment_class=SNPruneExperiment, log_timestep_freq=10, num_classes=NUM_CLASSES, batch_size=64, val_batch_size=128, extra_validations_per_epoch=1, validate_on_prune=True, seed=tune.sample_from(lambda spec: np.random.randint(2, 10000)), model_class=vdrop_resnet50, model_args=dict( num_classes=NUM_CLASSES, z_logvar_init=-15, vdrop_data_class=MaskedVDropCentralData, conv_layer=prunable_vdrop_conv2d, conv_args=dict( target_density=conv_target_density, ), linear_layer=prunable_vdrop_linear, linear_args=dict( target_density=0.25, ), ), optimizer_class=torch.optim.SGD, optimizer_args=dict( lr=6.0 / 6.0, momentum=0.75, ), lr_scheduler_class=None, lr_scheduler_args=None, teacher_model_class=[resnet50_swsl], downscale_reg_with_training_set=True, init_batch_norm=True, use_auto_augment=True, checkpoint_at_end=True, sync_to_driver=False, checkpoint_freq=10, keep_checkpoints_num=None, )) # 0.7465 on 32 GPUs INITIAL_EPOCHS = 15 NUM_PRUNINGS = 6 EPOCHS_BETWEEN_PRUNINGS = 3 FINAL_EPOCHS = 30 NUM_EPOCHS = (INITIAL_EPOCHS + ((NUM_PRUNINGS - 1) * EPOCHS_BETWEEN_PRUNINGS) + FINAL_EPOCHS) SNR_PRUNE_15_15_30 = copy.deepcopy(SNR_PRUNE_BASE) SNR_PRUNE_15_15_30.update(dict( name="SNR_PRUNE_15_15_30", epochs_to_validate=range(NUM_EPOCHS), epochs=NUM_EPOCHS, wandb_args=dict( project="snr-pruning", name="SNR 15-15(6)-30, peak_reg=0.01", ), multi_cycle_lr_args=( (0, dict(max_lr=6.0, pct_start=0.2, anneal_strategy="linear", base_momentum=0.6, max_momentum=0.75, cycle_momentum=True, div_factor=6.0, final_div_factor=1000.0)), (15, SUBSEQUENT_LR_SCHED_ARGS), (18, SUBSEQUENT_LR_SCHED_ARGS), (21, SUBSEQUENT_LR_SCHED_ARGS), (24, SUBSEQUENT_LR_SCHED_ARGS), (27, SUBSEQUENT_LR_SCHED_ARGS), (30, SUBSEQUENT_LR_SCHED_ARGS), ), reg_scalar=make_reg_schedule( epochs=NUM_EPOCHS, pct_ramp_start=2 / 60, pct_ramp_end=14 / 60, peak_value=0.01, pct_drop=30 / 60, final_value=0.0005, ), prune_schedule=[ (15, 1 / 6), (18, 2 / 6), (21, 3 / 6), (24, 4 / 6), (27, 5 / 6), (30, 6 / 6), ], )) class ExportedExperiment(mixins.ExportModel, mixins.RezeroWeights, mixins.KnowledgeDistillation, ImagenetExperiment): pass EXPORT_TO_STATIC = copy.deepcopy(DEFAULT) EXPORT_TO_STATIC.update( batch_size=128, experiment_class=ExportedExperiment, model_class=resnet50, model_args=dict( num_classes=NUM_CLASSES, conv_layer=sparse_conv2d, conv_args=dict( density=conv_target_density, ), linear_layer=sparse_linear, linear_args=dict( density=0.25, ), ), ) CONFIGS = dict( snr_prune_15_15_30=SNR_PRUNE_15_15_30, snr_prune_15_15_30_exported={**EXPORT_TO_STATIC, "prev_config": SNR_PRUNE_15_15_30, "export_model_fn": maskedvdrop_to_sparseweights}, )
agpl-3.0
scenarios/tensorflow
tensorflow/examples/learn/iris_custom_model.py
11
2621
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of Estimator for Iris plant dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import cross_validation from sklearn import datasets from sklearn import metrics import tensorflow as tf layers = tf.contrib.layers learn = tf.contrib.learn def my_model(features, target): """DNN with three hidden layers, and dropout of 0.1 probability.""" # Convert the target to a one-hot tensor of shape (length of features, 3) and # with a on-value of 1 for each one-hot vector of length 3. target = tf.one_hot(target, 3, 1, 0) # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. normalizer_fn = layers.dropout normalizer_params = {'keep_prob': 0.9} features = layers.stack( features, layers.fully_connected, [10, 20, 10], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(features, 3, activation_fn=None) loss = tf.contrib.losses.softmax_cross_entropy(logits, target) # Create a tensor for training op. train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return ({ 'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits) }, loss, train_op) def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = cross_validation.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = learn.Estimator(model_fn=my_model) classifier.fit(x_train, y_train, steps=1000) y_predicted = [ p['class'] for p in classifier.predict( x_test, as_iterable=True) ] score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy: {0:f}'.format(score)) if __name__ == '__main__': tf.app.run()
apache-2.0
marcocaccin/scikit-learn
sklearn/covariance/robust_covariance.py
103
29653
""" Robust location and covariance estimators. Here are implemented estimators that are resistant to outliers. """ # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import warnings import numbers import numpy as np from scipy import linalg from scipy.stats import chi2 from . import empirical_covariance, EmpiricalCovariance from ..utils.extmath import fast_logdet, pinvh from ..utils import check_random_state, check_array # Minimum Covariance Determinant # Implementing of an algorithm by Rousseeuw & Van Driessen described in # (A Fast Algorithm for the Minimum Covariance Determinant Estimator, # 1999, American Statistical Association and the American Society # for Quality, TECHNOMETRICS) # XXX Is this really a public function? It's not listed in the docs or # exported by sklearn.covariance. Deprecate? def c_step(X, n_support, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. Parameters ---------- X : array-like, shape (n_samples, n_features) Data set in which we look for the n_support observations whose scatter matrix has minimum determinant. n_support : int, > n_samples / 2 Number of observations to compute the robust estimates of location and covariance from. remaining_iterations : int, optional Number of iterations to perform. According to [Rouseeuw1999]_, two iterations are sufficient to get close to the minimum, and we never need more than 30 to reach convergence. initial_estimates : 2-tuple, optional Initial estimates of location and shape from which to run the c_step procedure: - initial_estimates[0]: an initial location estimate - initial_estimates[1]: an initial covariance estimate verbose : boolean, optional Verbose mode. random_state : integer or numpy.RandomState, optional The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) Returns ------- location : array-like, shape (n_features,) Robust location estimates. covariance : array-like, shape (n_features, n_features) Robust covariance estimates. support : array-like, shape (n_samples,) A mask for the `n_support` observations whose scatter matrix has minimum determinant. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ X = np.asarray(X) random_state = check_random_state(random_state) return _c_step(X, n_support, remaining_iterations=remaining_iterations, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state) def _c_step(X, n_support, random_state, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance): n_samples, n_features = X.shape # Initialisation support = np.zeros(n_samples, dtype=bool) if initial_estimates is None: # compute initial robust estimates from a random subset support[random_state.permutation(n_samples)[:n_support]] = True else: # get initial robust estimates from the function parameters location = initial_estimates[0] covariance = initial_estimates[1] # run a special iteration for that case (to get an initial support) precision = pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(1) # compute new estimates support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(0) covariance = cov_computation_method(X_support) # Iterative procedure for Minimum Covariance Determinant computation det = fast_logdet(covariance) previous_det = np.inf while (det < previous_det) and (remaining_iterations > 0): # save old estimates values previous_location = location previous_covariance = covariance previous_det = det previous_support = support # compute a new support from the full data set mahalanobis distances precision = pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) # compute new estimates support = np.zeros(n_samples, dtype=bool) support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(axis=0) covariance = cov_computation_method(X_support) det = fast_logdet(covariance) # update remaining iterations for early stopping remaining_iterations -= 1 previous_dist = dist dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) # Catch computation errors if np.isinf(det): raise ValueError( "Singular covariance matrix. " "Please check that the covariance matrix corresponding " "to the dataset is full rank and that MinCovDet is used with " "Gaussian-distributed data (or at least data drawn from a " "unimodal, symmetric distribution.") # Check convergence if np.allclose(det, previous_det): # c_step procedure converged if verbose: print("Optimal couple (location, covariance) found before" " ending iterations (%d left)" % (remaining_iterations)) results = location, covariance, det, support, dist elif det > previous_det: # determinant has increased (should not happen) warnings.warn("Warning! det > previous_det (%.15f > %.15f)" % (det, previous_det), RuntimeWarning) results = previous_location, previous_covariance, \ previous_det, previous_support, previous_dist # Check early stopping if remaining_iterations == 0: if verbose: print('Maximum number of iterations reached') results = location, covariance, det, support, dist return results def select_candidates(X, n_support, n_trials, select=1, n_iter=30, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """Finds the best pure subset of observations to compute MCD from it. The purpose of this function is to find the best sets of n_support observations with respect to a minimization of their covariance matrix determinant. Equivalently, it removes n_samples-n_support observations to construct what we call a pure data set (i.e. not containing outliers). The list of the observations of the pure data set is referred to as the `support`. Starting from a random support, the pure data set is found by the c_step procedure introduced by Rousseeuw and Van Driessen in [Rouseeuw1999]_. Parameters ---------- X : array-like, shape (n_samples, n_features) Data (sub)set in which we look for the n_support purest observations. n_support : int, [(n + p + 1)/2] < n_support < n The number of samples the pure data set must contain. select : int, int > 0 Number of best candidates results to return. n_trials : int, nb_trials > 0 or 2-tuple Number of different initial sets of observations from which to run the algorithm. Instead of giving a number of trials to perform, one can provide a list of initial estimates that will be used to iteratively run c_step procedures. In this case: - n_trials[0]: array-like, shape (n_trials, n_features) is the list of `n_trials` initial location estimates - n_trials[1]: array-like, shape (n_trials, n_features, n_features) is the list of `n_trials` initial covariances estimates n_iter : int, nb_iter > 0 Maximum number of iterations for the c_step procedure. (2 is enough to be close to the final solution. "Never" exceeds 20). random_state : integer or numpy.RandomState, default None The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) verbose : boolean, default False Control the output verbosity. See Also --------- c_step Returns ------- best_locations : array-like, shape (select, n_features) The `select` location estimates computed from the `select` best supports found in the data set (`X`). best_covariances : array-like, shape (select, n_features, n_features) The `select` covariance estimates computed from the `select` best supports found in the data set (`X`). best_supports : array-like, shape (select, n_samples) The `select` best supports found in the data set (`X`). References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ random_state = check_random_state(random_state) n_samples, n_features = X.shape if isinstance(n_trials, numbers.Integral): run_from_estimates = False elif isinstance(n_trials, tuple): run_from_estimates = True estimates_list = n_trials n_trials = estimates_list[0].shape[0] else: raise TypeError("Invalid 'n_trials' parameter, expected tuple or " " integer, got %s (%s)" % (n_trials, type(n_trials))) # compute `n_trials` location and shape estimates candidates in the subset all_estimates = [] if not run_from_estimates: # perform `n_trials` computations from random initial supports for j in range(n_trials): all_estimates.append( _c_step( X, n_support, remaining_iterations=n_iter, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) else: # perform computations from every given initial estimates for j in range(n_trials): initial_estimates = (estimates_list[0][j], estimates_list[1][j]) all_estimates.append(_c_step( X, n_support, remaining_iterations=n_iter, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \ zip(*all_estimates) # find the `n_best` best results among the `n_trials` ones index_best = np.argsort(all_dets_sub)[:select] best_locations = np.asarray(all_locs_sub)[index_best] best_covariances = np.asarray(all_covs_sub)[index_best] best_supports = np.asarray(all_supports_sub)[index_best] best_ds = np.asarray(all_ds_sub)[index_best] return best_locations, best_covariances, best_supports, best_ds def fast_mcd(X, support_fraction=None, cov_computation_method=empirical_covariance, random_state=None): """Estimates the Minimum Covariance Determinant matrix. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- X : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: `[n_sample + n_features + 1] / 2`. random_state : integer or numpy.RandomState, optional The generator used to randomly subsample. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) Notes ----- The FastMCD algorithm has been introduced by Rousseuw and Van Driessen in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS". The principle is to compute robust estimates and random subsets before pooling them into a larger subsets, and finally into the full data set. Depending on the size of the initial sample, we have one, two or three such computation levels. Note that only raw estimates are returned. If one is interested in the correction and reweighting steps described in [Rouseeuw1999]_, see the MinCovDet object. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 Returns ------- location : array-like, shape (n_features,) Robust location of the data. covariance : array-like, shape (n_features, n_features) Robust covariance of the features. support : array-like, type boolean, shape (n_samples,) A mask of the observations that have been used to compute the robust location and covariance estimates of the data set. """ random_state = check_random_state(random_state) X = check_array(X, ensure_min_samples=2, estimator='fast_mcd') n_samples, n_features = X.shape # minimum breakdown value if support_fraction is None: n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) else: n_support = int(support_fraction * n_samples) # 1-dimensional case quick computation # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust # Regression and Outlier Detection, John Wiley & Sons, chapter 4) if n_features == 1: if n_support < n_samples: # find the sample shortest halves X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)] halves_start = np.where(diff == np.min(diff))[0] # take the middle points' mean to get the robust location estimate location = 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) # get precision matrix in an optimized way precision = pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location # get precision matrix in an optimized way precision = pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) # Starting FastMCD algorithm for p-dimensional case if (n_samples > 500) and (n_features > 1): # 1. Find candidate supports on subsets # a. split the set in subsets of size ~ 300 n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) # b. perform a total of 500 trials n_trials_tot = 500 # c. select 10 best (location, covariance) for each subset n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: # The above is too big. Let's try with something much small # (and less optimal) all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_tot = 10 n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] best_locations_sub, best_covariances_sub, _, _ = select_candidates( current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub # 2. Pool the candidate supports into a merged set # (possibly the full dataset) n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 # find the best couples (location, covariance) on the merged set selection = random_state.permutation(n_samples)[:n_samples_merged] locations_merged, covariances_merged, supports_merged, d = \ select_candidates( X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=cov_computation_method, random_state=random_state) # 3. Finally get the overall best (locations, covariance) couple if n_samples < 1500: # directly get the best couple (location, covariance) location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: # select the best couple on the full dataset locations_full, covariances_full, supports_full, d = \ select_candidates( X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: # 1. Find the 10 best couples (location, covariance) # considering two iterations n_trials = 30 n_best = 10 locations_best, covariances_best, _, _ = select_candidates( X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) # 2. Select the best couple on the full dataset amongst the 10 locations_full, covariances_full, supports_full, d = select_candidates( X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] return location, covariance, support, dist class MinCovDet(EmpiricalCovariance): """Minimum Covariance Determinant (MCD): robust estimator of covariance. The Minimum Covariance Determinant covariance estimator is to be applied on Gaussian-distributed data, but could still be relevant on data drawn from a unimodal, symmetric distribution. It is not meant to be used with multi-modal data (the algorithm used to fit a MinCovDet object is likely to fail in such a case). One should consider projection pursuit methods to deal with multi-modal datasets. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- store_precision : bool Specify if the estimated precision is stored. assume_centered : Boolean If True, the support of the robust location and the covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2 random_state : integer or numpy.RandomState, optional The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- raw_location_ : array-like, shape (n_features,) The raw robust estimated location before correction and re-weighting. raw_covariance_ : array-like, shape (n_features, n_features) The raw robust estimated covariance before correction and re-weighting. raw_support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the raw robust estimates of location and shape, before correction and re-weighting. location_ : array-like, shape (n_features,) Estimated robust location covariance_ : array-like, shape (n_features, n_features) Estimated robust covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. dist_ : array-like, shape (n_samples,) Mahalanobis distances of the training set (on which `fit` is called) observations. References ---------- .. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984.` .. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS` .. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400` """ _nonrobust_covariance = staticmethod(empirical_covariance) def __init__(self, store_precision=True, assume_centered=False, support_fraction=None, random_state=None): self.store_precision = store_precision self.assume_centered = assume_centered self.support_fraction = support_fraction self.random_state = random_state def fit(self, X, y=None): """Fits a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self : object Returns self. """ X = check_array(X, ensure_min_samples=2, estimator='MinCovDet') random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn("The covariance matrix associated to your dataset " "is not full rank") # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) # get precision matrix in an optimized way precision = pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self def correct_covariance(self, data): """Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [Rouseeuw1984]_. Parameters ---------- data : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : array-like, shape (n_features, n_features) Corrected robust covariance estimate. """ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected def reweight_covariance(self, data): """Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates). [Rouseeuw1984]_ Parameters ---------- data : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : array-like, shape (n_features, ) Re-weighted robust location estimate. covariance_reweighted : array-like, shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : array-like, type boolean, shape (n_samples,) A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. """ n_samples, n_features = data.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = data[mask].mean(0) covariance_reweighted = self._nonrobust_covariance( data[mask], assume_centered=self.assume_centered) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = data - self.location_ self.dist_ = np.sum( np.dot(X_centered, self.get_precision()) * X_centered, 1) return location_reweighted, covariance_reweighted, support_reweighted
bsd-3-clause
marcocaccin/scikit-learn
examples/hetero_feature_union.py
286
6236
""" ============================================= Feature Union with Heterogeneous Data Sources ============================================= Datasets can often contain components of that require different feature extraction and processing pipelines. This scenario might occur when: 1. Your dataset consists of heterogeneous data types (e.g. raster images and text captions) 2. Your dataset is stored in a Pandas DataFrame and different columns require different processing pipelines. This example demonstrates how to use :class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing different types of features. We use the 20-newsgroups dataset and compute standard bag-of-words features for the subject line and body in separate pipelines as well as ad hoc features on the body. We combine them (with weights) using a FeatureUnion and finally train a classifier on the combined set of features. The choice of features is not particularly helpful, but serves to illustrate the technique. """ # Author: Matt Terry <matt.terry@gmail.com> # # License: BSD 3 clause from __future__ import print_function import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.datasets import fetch_20newsgroups from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.svm import SVC class ItemSelector(BaseEstimator, TransformerMixin): """For data grouped by feature, select subset of data at a provided key. The data is expected to be stored in a 2D data structure, where the first index is over features and the second is over samples. i.e. >> len(data[key]) == n_samples Please note that this is the opposite convention to sklearn feature matrixes (where the first index corresponds to sample). ItemSelector only requires that the collection implement getitem (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas DataFrame, numpy record array, etc. >> data = {'a': [1, 5, 2, 5, 2, 8], 'b': [9, 4, 1, 4, 1, 3]} >> ds = ItemSelector(key='a') >> data['a'] == ds.transform(data) ItemSelector is not designed to handle data grouped by sample. (e.g. a list of dicts). If your data is structured this way, consider a transformer along the lines of `sklearn.feature_extraction.DictVectorizer`. Parameters ---------- key : hashable, required The key corresponding to the desired value in a mappable. """ def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] class TextStats(BaseEstimator, TransformerMixin): """Extract features from each document for DictVectorizer""" def fit(self, x, y=None): return self def transform(self, posts): return [{'length': len(text), 'num_sentences': text.count('.')} for text in posts] class SubjectBodyExtractor(BaseEstimator, TransformerMixin): """Extract the subject & body from a usenet post in a single pass. Takes a sequence of strings and produces a dict of sequences. Keys are `subject` and `body`. """ def fit(self, x, y=None): return self def transform(self, posts): features = np.recarray(shape=(len(posts),), dtype=[('subject', object), ('body', object)]) for i, text in enumerate(posts): headers, _, bod = text.partition('\n\n') bod = strip_newsgroup_footer(bod) bod = strip_newsgroup_quoting(bod) features['body'][i] = bod prefix = 'Subject:' sub = '' for line in headers.split('\n'): if line.startswith(prefix): sub = line[len(prefix):] break features['subject'][i] = sub return features pipeline = Pipeline([ # Extract the subject & body ('subjectbody', SubjectBodyExtractor()), # Use FeatureUnion to combine the features from subject and body ('union', FeatureUnion( transformer_list=[ # Pipeline for pulling features from the post's subject line ('subject', Pipeline([ ('selector', ItemSelector(key='subject')), ('tfidf', TfidfVectorizer(min_df=50)), ])), # Pipeline for standard bag-of-words model for body ('body_bow', Pipeline([ ('selector', ItemSelector(key='body')), ('tfidf', TfidfVectorizer()), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for pulling ad hoc features from post's body ('body_stats', Pipeline([ ('selector', ItemSelector(key='body')), ('stats', TextStats()), # returns a list of dicts ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), ], # weight components in FeatureUnion transformer_weights={ 'subject': 0.8, 'body_bow': 0.5, 'body_stats': 1.0, }, )), # Use a SVC classifier on the combined features ('svc', SVC(kernel='linear')), ]) # limit the list of categories to make running this exmaple faster. categories = ['alt.atheism', 'talk.religion.misc'] train = fetch_20newsgroups(random_state=1, subset='train', categories=categories, ) test = fetch_20newsgroups(random_state=1, subset='test', categories=categories, ) pipeline.fit(train.data, train.target) y = pipeline.predict(test.data) print(classification_report(y, test.target))
bsd-3-clause
Tjorriemorrie/trading
13_resistancelines/cross-tick.py
1
3413
import logging from pprint import pprint from os.path import realpath, dirname import pandas as pd from sklearn.preprocessing import scale import matplotlib.pyplot as plt currencies = [ 'AUDUSD', 'EURGBP', 'EURJPY', 'EURUSD', 'GBPJPY', 'GBPUSD', 'NZDUSD', 'USDCAD', 'USDCHF', 'USDJPY', ] def main(): lines = [1, 2, 3, 5, 8, 13, 21] results = {} for currency in currencies: logging.info('Currency: {0}'.format(currency)) results[currency] = {} df = loadData(currency) # print df df = dropOutliers(df) # print df df = resistanceLines(df, lines) # print df.tail() for val in lines: high_val = 'high_{0}'.format(val) low_val = 'low_{0}'.format(val) xo_high_val = 'xo_high_{0}'.format(val) xo_low_val = 'xo_low_{0}'.format(val) df[xo_high_val] = df['high'] > df[high_val] df[xo_low_val] = df['low'] < df[low_val] results[currency][high_val] = df[xo_high_val].value_counts()[True] / float(len(df)) results[currency][low_val] = df[xo_low_val].value_counts()[True] / float(len(df)) print df[::len(df)/20] break for currency, data in results.iteritems(): for line, cnt in data.iteritems(): logging.info('{0} {1} {2:.2f}'.format(currency, line, cnt)) def loadData(currency): logging.info('Data: loading {0}...'.format(currency)) df = pd.read_csv( r'{0}/../data/{1}1440.csv'.format(realpath(dirname(__file__)), currency), names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'], parse_dates=[[0, 1]], index_col=0, ).astype(float) logging.info('Data: {0} rows loaded'.format(len(df))) return df def dropOutliers(df): logging.info('Dropping outliers...') size_start = len(df) # get range rolhigh = pd.rolling_max(df['high'], 5) rolmin = pd.rolling_min(df['low'], 5) df['range'] = rolhigh - rolmin df.dropna(inplace=True) # print df # get stats mean = df.range.mean() std = df.range.std() # drop outliers min_cutoff = mean - std * 2 max_cutoff = mean + std * 2 # logging.info('Dropping outliers between below {0:4f} and above {1:4f}'.format(min_cutoff, max_cutoff)) df = df[df['range'] > min_cutoff] df = df[df['range'] < max_cutoff] # logging.info('Dropped {0} rows'.format(500 - len(df))) # get stats # mean = df.range.mean() # std = df.range.std() # logging.info('{0} between {1} and {2} [{3}]'.format( # currency, # round(mean - std, 4), # round(mean + std, 4), # round(mean, 4), # )) logging.info('Outliers: {0} removed'.format(size_start - len(df))) return df def resistanceLines(df, vals): logging.info('Resistance lines...') # print df.tail() for val in vals: df['high_{0}'.format(val)] = pd.rolling_max(df['high'], val).shift(1) df['low_{0}'.format(val)] = pd.rolling_min(df['low'], val).shift(1) # print df.tail() df.dropna(inplace=True) logging.info('Resistance lines') return df if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s', # datefmt='%Y-%m-%d %H:%M:', ) main()
mit
brianjianhuang/cnn-text-classification-tf
data_helpers.py
2
4325
import numpy as np import re import itertools from collections import Counter def clean_str(string): """ Tokenization/string cleaning for all datasets except for SST. Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py """ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower() def load_data_and_labels(): """ Loads MR polarity data from files, splits the data into words and generates labels. Returns split sentences and labels. """ # Load data from files positive_examples = list(open("./data/rt-polaritydata/rt-polarity.pos", "r").readlines()) positive_examples = [s.strip() for s in positive_examples] negative_examples = list(open("./data/rt-polaritydata/rt-polarity.neg", "r").readlines()) negative_examples = [s.strip() for s in negative_examples] # Split by words x_text = positive_examples + negative_examples x_text = [clean_str(sent) for sent in x_text] x_text = [s.split(" ") for s in x_text] # Generate labels positive_labels = [[0, 1] for _ in positive_examples] negative_labels = [[1, 0] for _ in negative_examples] y = np.concatenate([positive_labels, negative_labels], 0) return [x_text, y] def pad_sentences(sentences, padding_word="<PAD/>"): """ Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences. """ sequence_length = max(len(x) for x in sentences) padded_sentences = [] for i in range(len(sentences)): sentence = sentences[i] num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences def build_vocab(sentences): """ Builds a vocabulary mapping from word to index based on the sentences. Returns vocabulary mapping and inverse vocabulary mapping. """ # Build vocabulary word_counts = Counter(itertools.chain(*sentences)) # Mapping from index to word vocabulary_inv = [x[0] for x in word_counts.most_common()] vocabulary_inv = list(sorted(vocabulary_inv)) # Mapping from word to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return [vocabulary, vocabulary_inv] def build_input_data(sentences, labels, vocabulary): """ Maps sentencs and labels to vectors based on a vocabulary. """ x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences]) y = np.array(labels) return [x, y] def load_data(): """ Loads and preprocessed data for the MR dataset. Returns input vectors, labels, vocabulary, and inverse vocabulary. """ # Load and preprocess data sentences, labels = load_data_and_labels() sentences_padded = pad_sentences(sentences) vocabulary, vocabulary_inv = build_vocab(sentences_padded) x, y = build_input_data(sentences_padded, labels, vocabulary) return [x, y, vocabulary, vocabulary_inv] def batch_iter(data, batch_size, num_epochs, shuffle=True): """ Generates a batch iterator for a dataset. """ data = np.array(data) data_size = len(data) num_batches_per_epoch = int(len(data)/batch_size) + 1 for epoch in range(num_epochs): # Shuffle the data at each epoch if shuffle: shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] else: shuffled_data = data for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) yield shuffled_data[start_index:end_index]
apache-2.0
thomasgilgenast/spqr-nonrel
djangoappengine/db/compiler.py
48
22510
from .db_settings import get_model_indexes from .utils import commit_locked from .expressions import ExpressionEvaluator import datetime import sys from django.db.models.sql import aggregates as sqlaggregates from django.db.models.sql.constants import LOOKUP_SEP, MULTI, SINGLE from django.db.models.sql.where import AND, OR from django.db.utils import DatabaseError, IntegrityError from django.utils.tree import Node from functools import wraps from google.appengine.api.datastore import Entity, Query, MultiQuery, \ Put, Get, Delete, Key from google.appengine.api.datastore_errors import Error as GAEError from google.appengine.api.datastore_types import Text, Category, Email, Link, \ PhoneNumber, PostalAddress, Text, Blob, ByteString, GeoPt, IM, Key, \ Rating, BlobKey from djangotoolbox.db.basecompiler import NonrelQuery, NonrelCompiler, \ NonrelInsertCompiler, NonrelUpdateCompiler, NonrelDeleteCompiler import cPickle as pickle import decimal # Valid query types (a dictionary is used for speedy lookups). OPERATORS_MAP = { 'exact': '=', 'gt': '>', 'gte': '>=', 'lt': '<', 'lte': '<=', # The following operators are supported with special code below: 'isnull': None, 'in': None, 'startswith': None, 'range': None, 'year': None, } NEGATION_MAP = { 'gt': '<=', 'gte': '<', 'lt': '>=', 'lte': '>', # TODO: support these filters #'exact': '!=', # this might actually become individual '<' and '>' queries } NOT_PROVIDED = object() def safe_call(func): @wraps(func) def _func(*args, **kwargs): try: return func(*args, **kwargs) except GAEError, e: raise DatabaseError, DatabaseError(str(e)), sys.exc_info()[2] return _func class GAEQuery(NonrelQuery): # ---------------------------------------------- # Public API # ---------------------------------------------- def __init__(self, compiler, fields): super(GAEQuery, self).__init__(compiler, fields) self.inequality_field = None self.pk_filters = None self.excluded_pks = () self.has_negated_exact_filter = False self.ordering = () self.gae_ordering = [] pks_only = False if len(fields) == 1 and fields[0].primary_key: pks_only = True self.db_table = self.query.get_meta().db_table self.pks_only = pks_only start_cursor = getattr(self.query, '_gae_start_cursor', None) end_cursor = getattr(self.query, '_gae_end_cursor', None) self.gae_query = [Query(self.db_table, keys_only=self.pks_only, cursor=start_cursor, end_cursor=end_cursor)] # This is needed for debugging def __repr__(self): return '<GAEQuery: %r ORDER %r>' % (self.gae_query, self.ordering) @safe_call def fetch(self, low_mark, high_mark): query = self._build_query() executed = False if self.excluded_pks and high_mark is not None: high_mark += len(self.excluded_pks) if self.pk_filters is not None: results = self.get_matching_pk(low_mark, high_mark) else: if high_mark is None: kw = {} if low_mark: kw['offset'] = low_mark results = query.Run(**kw) executed = True elif high_mark > low_mark: results = query.Get(high_mark - low_mark, low_mark) executed = True else: results = () for entity in results: if isinstance(entity, Key): key = entity else: key = entity.key() if key in self.excluded_pks: continue yield self._make_entity(entity) if executed and not isinstance(query, MultiQuery): try: self.query._gae_cursor = query.GetCompiledCursor() except: pass @safe_call def count(self, limit=NOT_PROVIDED): if self.pk_filters is not None: return len(self.get_matching_pk(0, limit)) if self.excluded_pks: return len(list(self.fetch(0, 2000))) # The datastore's Count() method has a 'limit' kwarg, which has # a default value (obviously). This value can be overridden to anything # you like, and importantly can be overridden to unlimited by passing # a value of None. Hence *this* method has a default value of # NOT_PROVIDED, rather than a default value of None kw = {} if limit is not NOT_PROVIDED: kw['limit'] = limit return self._build_query().Count(**kw) @safe_call def delete(self): if self.pk_filters is not None: keys = [key for key in self.pk_filters if key is not None] else: keys = self.fetch() if keys: Delete(keys) @safe_call def order_by(self, ordering): self.ordering = ordering for order in self.ordering: if order.startswith('-'): order, direction = order[1:], Query.DESCENDING else: direction = Query.ASCENDING if order == self.query.get_meta().pk.column: order = '__key__' self.gae_ordering.append((order, direction)) # This function is used by the default add_filters() implementation @safe_call def add_filter(self, column, lookup_type, negated, db_type, value): if value in ([], ()): self.pk_filters = [] return # Emulated/converted lookups if column == self.query.get_meta().pk.column: column = '__key__' db_table = self.query.get_meta().db_table if lookup_type in ('exact', 'in'): # Optimization: batch-get by key if self.pk_filters is not None: raise DatabaseError("You can't apply multiple AND filters " "on the primary key. " "Did you mean __in=[...]?") if not isinstance(value, (tuple, list)): value = [value] pks = [create_key(db_table, pk) for pk in value if pk] if negated: self.excluded_pks = pks else: self.pk_filters = pks return else: # XXX: set db_type to 'gae_key' in order to allow # convert_value_for_db to recognize the value to be a Key and # not a str. Otherwise the key would be converted back to a # unicode (see convert_value_for_db) db_type = 'gae_key' key_type_error = 'Lookup values on primary keys have to be' \ 'a string or an integer.' if lookup_type == 'range': if isinstance(value, (list, tuple)) and not ( isinstance(value[0], (basestring, int, long)) and isinstance(value[1], (basestring, int, long))): raise DatabaseError(key_type_error) elif not isinstance(value, (basestring, int, long)): raise DatabaseError(key_type_error) # for lookup type range we have to deal with a list if lookup_type == 'range': value[0] = create_key(db_table, value[0]) value[1] = create_key(db_table, value[1]) else: value = create_key(db_table, value) if lookup_type not in OPERATORS_MAP: raise DatabaseError("Lookup type %r isn't supported" % lookup_type) # We check for negation after lookup_type isnull because it # simplifies the code. All following lookup_type checks assume # that they're not negated. if lookup_type == 'isnull': if (negated and value) or not value: # TODO/XXX: is everything greater than None? op = '>' else: op = '=' value = None elif negated and lookup_type == 'exact': if self.has_negated_exact_filter: raise DatabaseError("You can't exclude more than one __exact " "filter") self.has_negated_exact_filter = True self._combine_filters(column, db_type, (('<', value), ('>', value))) return elif negated: try: op = NEGATION_MAP[lookup_type] except KeyError: raise DatabaseError("Lookup type %r can't be negated" % lookup_type) if self.inequality_field and column != self.inequality_field: raise DatabaseError("Can't have inequality filters on multiple " "columns (here: %r and %r)" % (self.inequality_field, column)) self.inequality_field = column elif lookup_type == 'in': # Create sub-query combinations, one for each value if len(self.gae_query) * len(value) > 30: raise DatabaseError("You can't query against more than " "30 __in filter value combinations") op_values = [('=', v) for v in value] self._combine_filters(column, db_type, op_values) return elif lookup_type == 'startswith': self._add_filter(column, '>=', db_type, value) if isinstance(value, str): value = value.decode('utf8') if isinstance(value, Key): value = list(value.to_path()) if isinstance(value[-1], str): value[-1] = value[-1].decode('utf8') value[-1] += u'\ufffd' value = Key.from_path(*value) else: value += u'\ufffd' self._add_filter(column, '<=', db_type, value) return elif lookup_type in ('range', 'year'): self._add_filter(column, '>=', db_type, value[0]) op = '<=' if lookup_type == 'range' else '<' self._add_filter(column, op, db_type, value[1]) return else: op = OPERATORS_MAP[lookup_type] self._add_filter(column, op, db_type, value) # ---------------------------------------------- # Internal API # ---------------------------------------------- def _add_filter(self, column, op, db_type, value): for query in self.gae_query: key = '%s %s' % (column, op) value = self.convert_value_for_db(db_type, value) if isinstance(value, Text): raise DatabaseError('TextField is not indexed, by default, ' "so you can't filter on it. Please add " 'an index definition for the column %s ' 'on the model %s.%s as described here:\n' 'http://www.allbuttonspressed.com/blog/django/2010/07/Managing-per-field-indexes-on-App-Engine' % (column, self.query.model.__module__, self.query.model.__name__)) if key in query: existing_value = query[key] if isinstance(existing_value, list): existing_value.append(value) else: query[key] = [existing_value, value] else: query[key] = value def _combine_filters(self, column, db_type, op_values): gae_query = self.gae_query combined = [] for query in gae_query: for op, value in op_values: self.gae_query = [Query(self.db_table, keys_only=self.pks_only)] self.gae_query[0].update(query) self._add_filter(column, op, db_type, value) combined.append(self.gae_query[0]) self.gae_query = combined def _make_entity(self, entity): if isinstance(entity, Key): key = entity entity = {} else: key = entity.key() entity[self.query.get_meta().pk.column] = key return entity @safe_call def _build_query(self): for query in self.gae_query: query.Order(*self.gae_ordering) if len(self.gae_query) > 1: return MultiQuery(self.gae_query, self.gae_ordering) return self.gae_query[0] def get_matching_pk(self, low_mark=0, high_mark=None): if not self.pk_filters: return [] results = [result for result in Get(self.pk_filters) if result is not None and self.matches_filters(result)] if self.ordering: results.sort(cmp=self.order_pk_filtered) if high_mark is not None and high_mark < len(results) - 1: results = results[:high_mark] if low_mark: results = results[low_mark:] return results def order_pk_filtered(self, lhs, rhs): left = dict(lhs) left[self.query.get_meta().pk.column] = lhs.key().to_path() right = dict(rhs) right[self.query.get_meta().pk.column] = rhs.key().to_path() return self._order_in_memory(left, right) def matches_filters(self, entity): item = dict(entity) pk = self.query.get_meta().pk value = self.convert_value_from_db(pk.db_type(connection=self.connection), entity.key()) item[pk.column] = value result = self._matches_filters(item, self.query.where) return result class SQLCompiler(NonrelCompiler): """ A simple App Engine query: no joins, no distinct, etc. """ query_class = GAEQuery def convert_value_from_db(self, db_type, value): if isinstance(value, (list, tuple, set)) and \ db_type.startswith(('ListField:', 'SetField:')): db_sub_type = db_type.split(':', 1)[1] value = [self.convert_value_from_db(db_sub_type, subvalue) for subvalue in value] if db_type.startswith('SetField:') and value is not None: value = set(value) if db_type.startswith('DictField:') and value is not None: value = pickle.loads(value) if ':' in db_type: db_sub_type = db_type.split(':', 1)[1] value = dict((key, self.convert_value_from_db(db_sub_type, value[key])) for key in value) # the following GAE database types are all unicode subclasses, cast them # to unicode so they appear like pure unicode instances for django if isinstance(value, basestring) and value and db_type.startswith('decimal'): value = decimal.Decimal(value) elif isinstance(value, (Category, Email, Link, PhoneNumber, PostalAddress, Text, unicode)): value = unicode(value) elif isinstance(value, Blob): value = str(value) elif isinstance(value, str): # always retrieve strings as unicode (it is possible that old datasets # contain non unicode strings, nevertheless work with unicode ones) value = value.decode('utf-8') elif isinstance(value, Key): # for now we do not support KeyFields thus a Key has to be the own # primary key # TODO: GAE: support parents via GAEKeyField assert value.parent() is None, "Parents are not yet supported!" if db_type == 'integer': if value.id() is None: raise DatabaseError('Wrong type for Key. Expected integer, found' 'None') else: value = value.id() elif db_type == 'text': if value.name() is None: raise DatabaseError('Wrong type for Key. Expected string, found' 'None') else: value = value.name() else: raise DatabaseError("%s fields cannot be keys on GAE" % db_type) elif db_type == 'date' and isinstance(value, datetime.datetime): value = value.date() elif db_type == 'time' and isinstance(value, datetime.datetime): value = value.time() return value def convert_value_for_db(self, db_type, value): if isinstance(value, unicode): value = unicode(value) elif isinstance(value, str): value = str(value) elif isinstance(value, (list, tuple, set)) and \ db_type.startswith(('ListField:', 'SetField:')): db_sub_type = db_type.split(':', 1)[1] value = [self.convert_value_for_db(db_sub_type, subvalue) for subvalue in value] elif isinstance(value, decimal.Decimal) and db_type.startswith("decimal:"): value = self.connection.ops.value_to_db_decimal(value, *eval(db_type[8:])) elif isinstance(value, dict) and db_type.startswith('DictField:'): if ':' in db_type: db_sub_type = db_type.split(':', 1)[1] value = dict([(key, self.convert_value_for_db(db_sub_type, value[key])) for key in value]) value = Blob(pickle.dumps(value)) if db_type == 'gae_key': return value elif db_type == 'longtext': # long text fields cannot be indexed on GAE so use GAE's database # type Text if value is not None: value = Text(value.decode('utf-8') if isinstance(value, str) else value) elif db_type == 'text': value = value.decode('utf-8') if isinstance(value, str) else value elif db_type == 'blob': if value is not None: value = Blob(value) elif type(value) is str: # always store unicode strings value = value.decode('utf-8') elif db_type == 'date' or db_type == 'time' or db_type == 'datetime': # here we have to check the db_type because GAE always stores datetimes value = to_datetime(value) return value class SQLInsertCompiler(NonrelInsertCompiler, SQLCompiler): @safe_call def insert(self, data, return_id=False): gae_data = {} opts = self.query.get_meta() unindexed_fields = get_model_indexes(self.query.model)['unindexed'] unindexed_cols = [opts.get_field(name).column for name in unindexed_fields] kwds = {'unindexed_properties': unindexed_cols} for column, value in data.items(): if column == opts.pk.column: if isinstance(value, basestring): kwds['name'] = value else: kwds['id'] = value elif isinstance(value, (tuple, list)) and not len(value): # gae does not store emty lists (and even does not allow passing empty # lists to Entity.update) so skip them continue else: gae_data[column] = value entity = Entity(self.query.get_meta().db_table, **kwds) entity.update(gae_data) key = Put(entity) return key.id_or_name() class SQLUpdateCompiler(NonrelUpdateCompiler, SQLCompiler): def execute_sql(self, result_type=MULTI): # modify query to fetch pks only and then execute the query # to get all pks pk = self.query.model._meta.pk.name self.query.add_immediate_loading([pk]) pks = [row for row in self.results_iter()] self.update_entities(pks) return len(pks) def update_entities(self, pks): for pk in pks: self.update_entity(pk[0]) @commit_locked def update_entity(self, pk): gae_query = self.build_query() key = create_key(self.query.get_meta().db_table, pk) entity = Get(key) if not gae_query.matches_filters(entity): return qn = self.quote_name_unless_alias update_dict = {} for field, o, value in self.query.values: if hasattr(value, 'prepare_database_save'): value = value.prepare_database_save(field) else: value = field.get_db_prep_save(value, connection=self.connection) if hasattr(value, "evaluate"): assert not value.negated assert not value.subtree_parents value = ExpressionEvaluator(value, self.query, entity, allow_joins=False) if hasattr(value, 'as_sql'): # evaluate expression and return the new value val = value.as_sql(qn, self.connection) update_dict[field] = val else: update_dict[field] = value for field, value in update_dict.iteritems(): db_type = field.db_type(connection=self.connection) entity[qn(field.column)] = self.convert_value_for_db(db_type, value) key = Put(entity) class SQLDeleteCompiler(NonrelDeleteCompiler, SQLCompiler): pass def to_datetime(value): """Convert a time or date to a datetime for datastore storage. Args: value: A datetime.time, datetime.date or string object. Returns: A datetime object with date set to 1970-01-01 if value is a datetime.time A datetime object with date set to value.year - value.month - value.day and time set to 0:00 if value is a datetime.date """ if value is None: return value elif isinstance(value, datetime.datetime): return value elif isinstance(value, datetime.date): return datetime.datetime(value.year, value.month, value.day) elif isinstance(value, datetime.time): return datetime.datetime(1970, 1, 1, value.hour, value.minute, value.second, value.microsecond) def create_key(db_table, value): if isinstance(value, (int, long)) and value < 1: return None return Key.from_path(db_table, value)
bsd-3-clause
Project-MONAI/MONAI
monai/transforms/transform.py
1
20419
# Copyright (c) MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A collection of generic interfaces for MONAI transforms. """ import logging from abc import ABC, abstractmethod from typing import Any, Callable, Dict, Generator, Hashable, Iterable, List, Mapping, Optional, Tuple, TypeVar, Union import numpy as np import torch from monai import config, transforms from monai.config import KeysCollection from monai.data.meta_tensor import MetaTensor from monai.utils import MAX_SEED, ensure_tuple, first from monai.utils.enums import TransformBackends from monai.utils.misc import MONAIEnvVars __all__ = [ "ThreadUnsafe", "apply_transform", "LazyTrait", "RandomizableTrait", "MultiSampleTrait", "Randomizable", "LazyTransform", "RandomizableTransform", "Transform", "MapTransform", ] ReturnType = TypeVar("ReturnType") def _apply_transform( transform: Callable[..., ReturnType], parameters: Any, unpack_parameters: bool = False ) -> ReturnType: """ Perform transformation `transform` with the provided parameters `parameters`. If `parameters` is a tuple and `unpack_items` is True, each parameter of `parameters` is unpacked as arguments to `transform`. Otherwise `parameters` is considered as single argument to `transform`. Args: transform: a callable to be used to transform `data`. parameters: parameters for the `transform`. unpack_parameters: whether to unpack parameters for `transform`. Defaults to False. Returns: ReturnType: The return type of `transform`. """ if isinstance(parameters, tuple) and unpack_parameters: return transform(*parameters) return transform(parameters) def apply_transform( transform: Callable[..., ReturnType], data: Any, map_items: bool = True, unpack_items: bool = False, log_stats: bool = False, ) -> Union[List[ReturnType], ReturnType]: """ Transform `data` with `transform`. If `data` is a list or tuple and `map_data` is True, each item of `data` will be transformed and this method returns a list of outcomes. otherwise transform will be applied once with `data` as the argument. Args: transform: a callable to be used to transform `data`. data: an object to be transformed. map_items: whether to apply transform to each item in `data`, if `data` is a list or tuple. Defaults to True. unpack_items: whether to unpack parameters using `*`. Defaults to False. log_stats: whether to log the detailed information of data and applied transform when error happened, for NumPy array and PyTorch Tensor, log the data shape and value range, for other metadata, log the values directly. default to `False`. Raises: Exception: When ``transform`` raises an exception. Returns: Union[List[ReturnType], ReturnType]: The return type of `transform` or a list thereof. """ try: if isinstance(data, (list, tuple)) and map_items: return [_apply_transform(transform, item, unpack_items) for item in data] return _apply_transform(transform, data, unpack_items) except Exception as e: # if in debug mode, don't swallow exception so that the breakpoint # appears where the exception was raised. if MONAIEnvVars.debug(): raise if log_stats and not isinstance(transform, transforms.compose.Compose): # log the input data information of exact transform in the transform chain datastats = transforms.utility.array.DataStats(data_shape=False, value_range=False) logger = logging.getLogger(datastats._logger_name) logger.info(f"\n=== Transform input info -- {type(transform).__name__} ===") if isinstance(data, (list, tuple)): data = data[0] def _log_stats(data, prefix: Optional[str] = "Data"): if isinstance(data, (np.ndarray, torch.Tensor)): # log data type, shape, range for array datastats(img=data, data_shape=True, value_range=True, prefix=prefix) else: # log data type and value for other metadata datastats(img=data, data_value=True, prefix=prefix) if isinstance(data, dict): for k, v in data.items(): _log_stats(data=v, prefix=k) else: _log_stats(data=data) raise RuntimeError(f"applying transform {transform}") from e class LazyTrait: """ An interface to indicate that the transform has the capability to execute using MONAI's lazy resampling feature. In order to do this, the implementing class needs to be able to describe its operation as an affine matrix or grid with accompanying metadata. This interface can be extended from by people adapting transforms to the MONAI framework as well as by implementors of MONAI transforms. """ @property def lazy_evaluation(self): """ Get whether lazy_evaluation is enabled for this transform instance. Returns: True if the transform is operating in a lazy fashion, False if not. """ raise NotImplementedError() @lazy_evaluation.setter def lazy_evaluation(self, enabled: bool): """ Set whether lazy_evaluation is enabled for this transform instance. Args: enabled: True if the transform should operate in a lazy fashion, False if not. """ raise NotImplementedError() class RandomizableTrait: """ An interface to indicate that the transform has the capability to perform randomized transforms to the data that it is called upon. This interface can be extended from by people adapting transforms to the MONAI framework as well as by implementors of MONAI transforms. """ pass class MultiSampleTrait: """ An interface to indicate that the transform has the capability to return multiple samples given an input, such as when performing random crops of a sample. This interface can be extended from by people adapting transforms to the MONAI framework as well as by implementors of MONAI transforms. """ pass class ThreadUnsafe: """ A class to denote that the transform will mutate its member variables, when being applied. Transforms inheriting this class should be used cautiously in a multi-thread context. This type is typically used by :py:class:`monai.data.CacheDataset` and its extensions, where the transform cache is built with multiple threads. """ pass class Randomizable(ThreadUnsafe): """ An interface for handling random state locally, currently based on a class variable `R`, which is an instance of `np.random.RandomState`. This provides the flexibility of component-specific determinism without affecting the global states. It is recommended to use this API with :py:class:`monai.data.DataLoader` for deterministic behaviour of the preprocessing pipelines. This API is not thread-safe. Additionally, deepcopying instance of this class often causes insufficient randomness as the random states will be duplicated. """ R: np.random.RandomState = np.random.RandomState() def set_random_state( self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None ) -> "Randomizable": """ Set the random state locally, to control the randomness, the derived classes should use :py:attr:`self.R` instead of `np.random` to introduce random factors. Args: seed: set the random state with an integer seed. state: set the random state with a `np.random.RandomState` object. Raises: TypeError: When ``state`` is not an ``Optional[np.random.RandomState]``. Returns: a Randomizable instance. """ if seed is not None: _seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed _seed = _seed % MAX_SEED self.R = np.random.RandomState(_seed) return self if state is not None: if not isinstance(state, np.random.RandomState): raise TypeError(f"state must be None or a np.random.RandomState but is {type(state).__name__}.") self.R = state return self self.R = np.random.RandomState() return self def randomize(self, data: Any) -> None: """ Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. all :py:attr:`self.R` calls happen here so that we have a better chance to identify errors of sync the random state. This method can generate the random factors based on properties of the input data. Raises: NotImplementedError: When the subclass does not override this method. """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") class Transform(ABC): """ An abstract class of a ``Transform``. A transform is callable that processes ``data``. It could be stateful and may modify ``data`` in place, the implementation should be aware of: #. thread safety when mutating its own states. When used from a multi-process context, transform's instance variables are read-only. thread-unsafe transforms should inherit :py:class:`monai.transforms.ThreadUnsafe`. #. ``data`` content unused by this transform may still be used in the subsequent transforms in a composed transform. #. storing too much information in ``data`` may cause some memory issue or IPC sync issue, especially in the multi-processing environment of PyTorch DataLoader. See Also :py:class:`monai.transforms.Compose` """ # Transforms should add `monai.transforms.utils.TransformBackends` to this list if they are performing # the data processing using the corresponding backend APIs. # Most of MONAI transform's inputs and outputs will be converted into torch.Tensor or monai.data.MetaTensor. # This variable provides information about whether the input will be converted # to other data types during the transformation. Note that not all `dtype` (such as float32, uint8) are supported # by all the data types, the `dtype` during the conversion is determined automatically by each transform, # please refer to the transform's docstring. backend: List[TransformBackends] = [] @abstractmethod def __call__(self, data: Any): """ ``data`` is an element which often comes from an iteration over an iterable, such as :py:class:`torch.utils.data.Dataset`. This method should return an updated version of ``data``. To simplify the input validations, most of the transforms assume that - ``data`` is a Numpy ndarray, PyTorch Tensor or string, - the data shape can be: #. string data without shape, `LoadImage` transform expects file paths, #. most of the pre-/post-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, except for example: `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels), - the channel dimension is often not omitted even if number of channels is one. This method can optionally take additional arguments to help execute transformation operation. Raises: NotImplementedError: When the subclass does not override this method. """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") class LazyTransform(Transform, LazyTrait): """ An implementation of functionality for lazy transforms that can be subclassed by array and dictionary transforms to simplify implementation of new lazy transforms. """ def __init__(self, lazy_evaluation: Optional[bool] = True): self.lazy_evaluation = lazy_evaluation @property def lazy_evaluation(self): return self.lazy_evaluation @lazy_evaluation.setter def lazy_evaluation(self, lazy_evaluation: bool): if not isinstance(lazy_evaluation, bool): raise TypeError("'lazy_evaluation must be a bool but is of " f"type {type(lazy_evaluation)}'") self.lazy_evaluation = lazy_evaluation class RandomizableTransform(Randomizable, Transform, RandomizableTrait): """ An interface for handling random state locally, currently based on a class variable `R`, which is an instance of `np.random.RandomState`. This class introduces a randomized flag `_do_transform`, is mainly for randomized data augmentation transforms. For example: .. code-block:: python from monai.transforms import RandomizableTransform class RandShiftIntensity100(RandomizableTransform): def randomize(self): super().randomize(None) self._offset = self.R.uniform(low=0, high=100) def __call__(self, img): self.randomize() if not self._do_transform: return img return img + self._offset transform = RandShiftIntensity() transform.set_random_state(seed=0) print(transform(10)) """ def __init__(self, prob: float = 1.0, do_transform: bool = True): self._do_transform = do_transform self.prob = min(max(prob, 0.0), 1.0) def randomize(self, data: Any) -> None: """ Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. all :py:attr:`self.R` calls happen here so that we have a better chance to identify errors of sync the random state. This method can generate the random factors based on properties of the input data. """ self._do_transform = self.R.rand() < self.prob class MapTransform(Transform): """ A subclass of :py:class:`monai.transforms.Transform` with an assumption that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``. The ``keys`` parameter will be used to get and set the actual data item to transform. That is, the callable of this transform should follow the pattern: .. code-block:: python def __call__(self, data): for key in self.keys: if key in data: # update output data with some_transform_function(data[key]). else: # raise exception unless allow_missing_keys==True. return data Raises: ValueError: When ``keys`` is an empty iterable. TypeError: When ``keys`` type is not in ``Union[Hashable, Iterable[Hashable]]``. """ def __new__(cls, *args, **kwargs): if config.USE_META_DICT: # call_update after MapTransform.__call__ cls.__call__ = transforms.attach_hook(cls.__call__, MapTransform.call_update, "post") if hasattr(cls, "inverse"): # inverse_update before InvertibleTransform.inverse cls.inverse = transforms.attach_hook(cls.inverse, transforms.InvertibleTransform.inverse_update) return Transform.__new__(cls) def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: self.keys: Tuple[Hashable, ...] = ensure_tuple(keys) self.allow_missing_keys = allow_missing_keys if not self.keys: raise ValueError("keys must be non empty.") for key in self.keys: if not isinstance(key, Hashable): raise TypeError(f"keys must be one of (Hashable, Iterable[Hashable]) but is {type(keys).__name__}.") def call_update(self, data): """ This function is to be called after every `self.__call__(data)`, update `data[key_transforms]` and `data[key_meta_dict]` using the content from MetaTensor `data[key]`, for MetaTensor backward compatibility 0.9.0. """ if not isinstance(data, (list, tuple, Mapping)): return data is_dict = False if isinstance(data, Mapping): data, is_dict = [data], True if not data or not isinstance(data[0], Mapping): return data[0] if is_dict else data list_d = [dict(x) for x in data] # list of dict for crop samples for idx, dict_i in enumerate(list_d): for k in dict_i: if not isinstance(dict_i[k], MetaTensor): continue list_d[idx] = transforms.sync_meta_info(k, dict_i, t=not isinstance(self, transforms.InvertD)) return list_d[0] if is_dict else list_d @abstractmethod def __call__(self, data): """ ``data`` often comes from an iteration over an iterable, such as :py:class:`torch.utils.data.Dataset`. To simplify the input validations, this method assumes: - ``data`` is a Python dictionary, - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element of ``self.keys``, the data shape can be: #. string data without shape, `LoadImaged` transform expects file paths, #. most of the pre-/post-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, except for example: `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) - the channel dimension is often not omitted even if number of channels is one. Raises: NotImplementedError: When the subclass does not override this method. returns: An updated dictionary version of ``data`` by applying the transform. """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") def key_iterator(self, data: Mapping[Hashable, Any], *extra_iterables: Optional[Iterable]) -> Generator: """ Iterate across keys and optionally extra iterables. If key is missing, exception is raised if `allow_missing_keys==False` (default). If `allow_missing_keys==True`, key is skipped. Args: data: data that the transform will be applied to extra_iterables: anything else to be iterated through """ # if no extra iterables given, create a dummy list of Nones ex_iters = extra_iterables or [[None] * len(self.keys)] # loop over keys and any extra iterables _ex_iters: List[Any] for key, *_ex_iters in zip(self.keys, *ex_iters): # all normal, yield (what we yield depends on whether extra iterables were given) if key in data: yield (key,) + tuple(_ex_iters) if extra_iterables else key elif not self.allow_missing_keys: raise KeyError( f"Key `{key}` of transform `{self.__class__.__name__}` was missing in the data" " and allow_missing_keys==False." ) def first_key(self, data: Dict[Hashable, Any]): """ Get the first available key of `self.keys` in the input `data` dictionary. If no available key, return an empty tuple `()`. Args: data: data that the transform will be applied to. """ return first(self.key_iterator(data), ())
apache-2.0
Garrett-R/scikit-learn
sklearn/cross_validation.py
2
57631
""" The :mod:`sklearn.cross_validation` module includes utilities for cross- validation and performance evaluation. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings from itertools import chain, combinations from math import ceil, floor, factorial import numbers import time from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import is_classifier, clone from .utils import indexable, check_random_state, safe_indexing from .utils.validation import _num_samples, check_array from .utils.multiclass import type_of_target from .externals.joblib import Parallel, delayed, logger from .externals.six import with_metaclass from .externals.six.moves import zip from .metrics.scorer import check_scoring __all__ = ['Bootstrap', 'KFold', 'LeaveOneLabelOut', 'LeaveOneOut', 'LeavePLabelOut', 'LeavePOut', 'ShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'check_cv', 'cross_val_score', 'permutation_test_score', 'train_test_split'] class _PartitionIterator(with_metaclass(ABCMeta)): """Base class for CV iterators where train_mask = ~test_mask Implementations must define `_iter_test_masks` or `_iter_test_indices`. Parameters ---------- n : int Total number of elements in dataset. """ def __init__(self, n, indices=None): if indices is None: indices = True else: warnings.warn("The indices parameter is deprecated and will be " "removed (assumed True) in 0.17", DeprecationWarning, stacklevel=1) if abs(n - int(n)) >= np.finfo('f').eps: raise ValueError("n must be an integer") self.n = int(n) self._indices = indices @property def indices(self): warnings.warn("The indices attribute is deprecated and will be " "removed (assumed True) in 0.17", DeprecationWarning, stacklevel=1) return self._indices def __iter__(self): indices = self._indices if indices: ind = np.arange(self.n) for test_index in self._iter_test_masks(): train_index = np.logical_not(test_index) if indices: train_index = ind[train_index] test_index = ind[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices() """ for test_index in self._iter_test_indices(): test_mask = self._empty_mask() test_mask[test_index] = True yield test_mask def _iter_test_indices(self): """Generates integer indices corresponding to test sets.""" raise NotImplementedError def _empty_mask(self): return np.zeros(self.n, dtype=np.bool) class LeaveOneOut(_PartitionIterator): """Leave-One-Out cross validation iterator. Provides train/test indices to split data in train test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and ``LeavePOut(n, p=1)``. Due to the high number of test sets (which is the same as the number of samples) this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Parameters ---------- n : int Total number of elements in dataset. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = cross_validation.LeaveOneOut(2) >>> len(loo) 2 >>> print(loo) sklearn.cross_validation.LeaveOneOut(n=2) >>> for train_index, test_index in loo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def _iter_test_indices(self): return range(self.n) def __repr__(self): return '%s.%s(n=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, ) def __len__(self): return self.n class LeavePOut(_PartitionIterator): """Leave-P-Out cross validation iterator Provides train/test indices to split data in train test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Parameters ---------- n : int Total number of elements in dataset. p : int Size of the test sets. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = cross_validation.LeavePOut(4, 2) >>> len(lpo) 6 >>> print(lpo) sklearn.cross_validation.LeavePOut(n=4, p=2) >>> for train_index, test_index in lpo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, n, p, indices=None): super(LeavePOut, self).__init__(n, indices) self.p = p def _iter_test_indices(self): for comb in combinations(range(self.n), self.p): yield np.array(comb) def __repr__(self): return '%s.%s(n=%i, p=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.p, ) def __len__(self): return int(factorial(self.n) / factorial(self.n - self.p) / factorial(self.p)) class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)): """Base class to validate KFold approaches""" @abstractmethod def __init__(self, n, n_folds, indices, shuffle, random_state): super(_BaseKFold, self).__init__(n, indices) if abs(n_folds - int(n_folds)) >= np.finfo('f').eps: raise ValueError("n_folds must be an integer") self.n_folds = n_folds = int(n_folds) if n_folds <= 1: raise ValueError( "k-fold cross validation requires at least one" " train / test split by setting n_folds=2 or more," " got n_folds={0}.".format(n_folds)) if n_folds > self.n: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of samples: {1}.").format(n_folds, n)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.shuffle = shuffle self.random_state = random_state class KFold(_BaseKFold): """K-Folds cross validation iterator. Provides train/test indices to split data in train test sets. Split dataset into k consecutive folds (without shuffling). Each fold is then used a validation set once while the k - 1 remaining fold form the training set. Parameters ---------- n : int Total number of elements. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState Pseudo-random number generator state used for random sampling. If None, use default numpy RNG for shuffling Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = cross_validation.KFold(4, n_folds=2) >>> len(kf) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in kf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first n % n_folds folds have size n // n_folds + 1, other folds have size n // n_folds. See also -------- StratifiedKFold: take label information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). """ def __init__(self, n, n_folds=3, indices=None, shuffle=False, random_state=None): super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state) self.idxs = np.arange(n) if shuffle: rng = check_random_state(self.random_state) rng.shuffle(self.idxs) def _iter_test_indices(self): n = self.n n_folds = self.n_folds fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield self.idxs[start:stop] current = stop def __repr__(self): return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Parameters ---------- y : array-like, [n_samples] Samples to split in K folds. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : None, int or RandomState Pseudo-random number generator state used for random sampling. If None, use default numpy RNG for shuffling Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = cross_validation.StratifiedKFold(y, n_folds=2) >>> len(skf) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in skf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size trunc(n_samples / n_folds), the last one has the complementary. """ def __init__(self, y, n_folds=3, indices=None, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__( len(y), n_folds, indices, shuffle, random_state) y = np.asarray(y) n_samples = y.shape[0] unique_labels, y_inversed = np.unique(y, return_inverse=True) label_counts = np.bincount(y_inversed) min_labels = np.min(label_counts) if self.n_folds > min_labels: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of labels for any class cannot" " be less than n_folds=%d." % (min_labels, self.n_folds)), Warning) # don't want to use the same seed in each label's shuffle if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each label so as to respect the # balance of labels per_label_cvs = [ KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle, random_state=rng) for c in label_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)): for label, (_, test_split) in zip(unique_labels, per_label_splits): label_test_folds = test_folds[y == label] # the test split can be too big because we used # KFold(max(c, self.n_folds), self.n_folds) instead of # KFold(c, self.n_folds) to make it possible to not crash even # if the data is not 100% stratifiable for all the labels # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(label_test_folds)] label_test_folds[test_split] = test_fold_idx test_folds[y == label] = label_test_folds self.test_folds = test_folds self.y = y def _iter_test_masks(self): for i in range(self.n_folds): yield self.test_folds == i def __repr__(self): return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.y, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LeaveOneLabelOut(_PartitionIterator): """Leave-One-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> labels = np.array([1, 1, 2, 2]) >>> lol = cross_validation.LeaveOneLabelOut(labels) >>> len(lol) 2 >>> print(lol) sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2]) >>> for train_index, test_index in lol: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] """ def __init__(self, labels, indices=None): super(LeaveOneLabelOut, self).__init__(len(labels), indices) # We make a copy of labels to avoid side-effects during iteration self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) def _iter_test_masks(self): for i in self.unique_labels: yield self.labels == i def __repr__(self): return '%s.%s(labels=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, ) def __len__(self): return self.n_unique_labels class LeavePLabelOut(_PartitionIterator): """Leave-P-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LeaveOneLabelOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the labels while the latter uses samples all assigned the same labels. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. p : int Number of samples to leave out in the test split. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> labels = np.array([1, 2, 3]) >>> lpl = cross_validation.LeavePLabelOut(labels, p=2) >>> len(lpl) 3 >>> print(lpl) sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2) >>> for train_index, test_index in lpl: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] """ def __init__(self, labels, p, indices=None): # We make a copy of labels to avoid side-effects during iteration super(LeavePLabelOut, self).__init__(len(labels), indices) self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) self.p = p def _iter_test_masks(self): comb = combinations(range(self.n_unique_labels), self.p) for idx in comb: test_index = self._empty_mask() idx = np.array(idx) for l in self.unique_labels[idx]: test_index[self.labels == l] = True yield test_index def __repr__(self): return '%s.%s(labels=%s, p=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, self.p, ) def __len__(self): return int(factorial(self.n_unique_labels) / factorial(self.n_unique_labels - self.p) / factorial(self.p)) class Bootstrap(object): """Random sampling with replacement cross-validation iterator Provides train/test indices to split data in train test sets while resampling the input n_iter times: each time a new random split of the data is performed and then samples are drawn (with replacement) on each side of the split to build the training and test sets. Note: contrary to other cross-validation strategies, bootstrapping will allow some samples to occur several times in each splits. However a sample that occurs in the train split will never occur in the test split and vice-versa. If you want each sample to occur at most once you should probably use ShuffleSplit cross validation instead. Parameters ---------- n : int Total number of elements in the dataset. n_iter : int (default is 3) Number of bootstrapping iterations train_size : int or float (default is 0.5) If int, number of samples to include in the training split (should be smaller than the total number of samples passed in the dataset). If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. test_size : int or float or None (default is None) If int, number of samples to include in the training set (should be smaller than the total number of samples passed in the dataset). If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If None, n_test is set as the complement of n_train. random_state : int or RandomState Pseudo number generator state used for random sampling. Examples -------- >>> from sklearn import cross_validation >>> bs = cross_validation.Bootstrap(9, random_state=0) >>> len(bs) 3 >>> print(bs) Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0) >>> for train_index, test_index in bs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [1 8 7 7 8] TEST: [0 3 0 5] TRAIN: [5 4 2 4 2] TEST: [6 7 1 0] TRAIN: [4 7 0 1 1] TEST: [5 3 6 5] See also -------- ShuffleSplit: cross validation using random permutations. """ # Static marker to be able to introspect the CV type indices = True def __init__(self, n, n_iter=3, train_size=.5, test_size=None, random_state=None, n_bootstraps=None): # See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation # behind this deprecation warnings.warn("Bootstrap will no longer be supported as a " + "cross-validation method as of version 0.15 and " + "will be removed in 0.17", DeprecationWarning) self.n = n if n_bootstraps is not None: # pragma: no cover warnings.warn("n_bootstraps was renamed to n_iter and will " "be removed in 0.16.", DeprecationWarning) n_iter = n_bootstraps self.n_iter = n_iter if (isinstance(train_size, numbers.Real) and train_size >= 0.0 and train_size <= 1.0): self.train_size = int(ceil(train_size * n)) elif isinstance(train_size, numbers.Integral): self.train_size = train_size else: raise ValueError("Invalid value for train_size: %r" % train_size) if self.train_size > n: raise ValueError("train_size=%d should not be larger than n=%d" % (self.train_size, n)) if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0: self.test_size = int(ceil(test_size * n)) elif isinstance(test_size, numbers.Integral): self.test_size = test_size elif test_size is None: self.test_size = self.n - self.train_size else: raise ValueError("Invalid value for test_size: %r" % test_size) if self.test_size > n - self.train_size: raise ValueError(("test_size + train_size=%d, should not be " + "larger than n=%d") % (self.test_size + self.train_size, n)) self.random_state = random_state def __iter__(self): rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(self.n) ind_train = permutation[:self.train_size] ind_test = permutation[self.train_size:self.train_size + self.test_size] # bootstrap in each split individually train = rng.randint(0, self.train_size, size=(self.train_size,)) test = rng.randint(0, self.test_size, size=(self.test_size,)) yield ind_train[train], ind_test[test] def __repr__(self): return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, ' 'random_state=%s)' % ( self.__class__.__name__, self.n, self.n_iter, self.train_size, self.test_size, self.random_state, )) def __len__(self): return self.n_iter class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n, n_iter=10, test_size=0.1, train_size=None, indices=None, random_state=None, n_iterations=None): if indices is None: indices = True else: warnings.warn("The indices parameter is deprecated and will be " "removed (assumed True) in 0.17", DeprecationWarning) self.n = n self.n_iter = n_iter if n_iterations is not None: # pragma: no cover warnings.warn("n_iterations was renamed to n_iter for consistency " " and will be removed in 0.16.") self.n_iter = n_iterations self.test_size = test_size self.train_size = train_size self.random_state = random_state self._indices = indices self.n_train, self.n_test = _validate_shuffle_split(n, test_size, train_size) @property def indices(self): warnings.warn("The indices attribute is deprecated and will be " "removed (assumed True) in 0.17", DeprecationWarning, stacklevel=1) return self._indices def __iter__(self): if self._indices: for train, test in self._iter_indices(): yield train, test return for train, test in self._iter_indices(): train_m = np.zeros(self.n, dtype=bool) test_m = np.zeros(self.n, dtype=bool) train_m[train] = True test_m[test] = True yield train_m, test_m @abstractmethod def _iter_indices(self): """Generate (train, test) indices""" class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validation iterator. Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Parameters ---------- n : int Total number of elements in the dataset. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn import cross_validation >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... test_size=.25, random_state=0) >>> len(rs) 3 >>> print(rs) ... # doctest: +ELLIPSIS ShuffleSplit(4, n_iter=3, test_size=0.25, ...) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... train_size=0.5, test_size=.25, random_state=0) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] See also -------- Bootstrap: cross-validation using re-sampling with replacement. """ def _iter_indices(self): rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(self.n) ind_test = permutation[:self.n_test] ind_train = permutation[self.n_test:self.n_test + self.n_train] yield ind_train, ind_test def __repr__(self): return ('%s(%d, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.n, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _validate_shuffle_split(n, test_size, train_size): if test_size is None and train_size is None: raise ValueError( 'test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind == 'i': if test_size >= n: raise ValueError( 'test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n)) else: raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif np.asarray(test_size).dtype.kind == 'f' and \ train_size + test_size > 1.: raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind == 'i': if train_size >= n: raise ValueError("train_size=%d should be smaller " "than the number of samples %d" % (train_size, n)) else: raise ValueError("Invalid value for train_size: %r" % train_size) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n - n_test else: if np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n) else: n_train = float(train_size) if test_size is None: n_test = n - n_train if n_train + n_test > n: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n)) return int(n_train), int(n_test) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Parameters ---------- y : array, [n_samples] Labels of samples. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.cross_validation import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0) >>> len(sss) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...) >>> for train_index, test_index in sss: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, y, n_iter=10, test_size=0.1, train_size=None, indices=None, random_state=None, n_iterations=None): super(StratifiedShuffleSplit, self).__init__( len(y), n_iter, test_size, train_size, indices, random_state, n_iterations) self.y = np.array(y) self.classes, self.y_indices = np.unique(y, return_inverse=True) n_cls = self.classes.shape[0] if np.min(np.bincount(self.y_indices)) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of labels for any class cannot" " be less than 2.") if self.n_train < n_cls: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_train, n_cls)) if self.n_test < n_cls: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_test, n_cls)) def _iter_indices(self): rng = check_random_state(self.random_state) cls_count = np.bincount(self.y_indices) p_i = cls_count / float(self.n) n_i = np.round(self.n_train * p_i).astype(int) t_i = np.minimum(cls_count - n_i, np.round(self.n_test * p_i).astype(int)) for n in range(self.n_iter): train = [] test = [] for i, cls in enumerate(self.classes): permutation = rng.permutation(cls_count[i]) cls_i = np.where((self.y == cls))[0][permutation] train.extend(cls_i[:n_i[i]]) test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]]) # Because of rounding issues (as n_train and n_test are not # dividers of the number of elements per class), we may end # up here with less samples in train and test than asked for. if len(train) < self.n_train or len(test) < self.n_test: # We complete by affecting randomly the missing indexes missing_idx = np.where(np.bincount(train + test, minlength=len(self.y)) == 0, )[0] missing_idx = rng.permutation(missing_idx) train.extend(missing_idx[:(self.n_train - len(train))]) test.extend(missing_idx[-(self.n_test - len(test)):]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.y, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter ############################################################################## def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0] class FitFailedWarning(RuntimeWarning): pass def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust lenght of sample weights n_samples = _num_samples(X) fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)" ) else: test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def _permutation_test_score(estimator, X, y, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv: estimator.fit(X[train], y[train]) avg_score.append(scorer(estimator, X[test], y[test])) return np.mean(avg_score) def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind] def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, a cv generator instance, or None The input specifying which cv generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True) def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False): # This exists for internal use while indices is being deprecated. is_sparse = sp.issparse(X) needs_indices = is_sparse or not hasattr(X, "shape") if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if warn_mask and not needs_indices: warnings.warn('check_cv will return indices instead of boolean ' 'masks from 0.17', DeprecationWarning) else: needs_indices = None if classifier: if type_of_target(y) in ['binary', 'multiclass']: cv = StratifiedKFold(y, cv, indices=needs_indices) else: cv = KFold(_num_samples(y), cv, indices=needs_indices) else: if not is_sparse: n_samples = len(X) else: n_samples = X.shape[0] cv = KFold(n_samples, cv, indices=needs_indices) if needs_indices and not getattr(cv, "_indices", True): raise ValueError("Sparse data and lists require indices-based cross" " validation generator, got: %r", cv) return cv def permutation_test_score(estimator, X, y, cv=None, n_permutations=100, n_jobs=1, labels=None, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. labels : array-like of shape [n_samples] (optional) Labels constrain the permutation among groups of samples with a same label. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape = [n_permutations] The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, labels, random_state), cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(iter(ShuffleSplit(n_samples)))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Parameters ---------- *arrays : sequence of arrays or scipy.sparse matrices with same shape[0] Python lists or tuples occurring in arrays are converted to 1D numpy arrays. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Returns ------- splitting : list of arrays, length=2 * len(arrays) List containing train-test split of input array. Examples -------- >>> import numpy as np >>> from sklearn.cross_validation import train_test_split >>> a, b = np.arange(10).reshape((5, 2)), range(5) >>> a array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(b) [0, 1, 2, 3, 4] >>> a_train, a_test, b_train, b_test = train_test_split( ... a, b, test_size=0.33, random_state=42) ... >>> a_train array([[4, 5], [0, 1], [6, 7]]) >>> b_train [2, 0, 3] >>> a_test array([[2, 3], [8, 9]]) >>> b_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) dtype = options.pop('dtype', None) if dtype is not None: warnings.warn("dtype option is ignored and will be removed in 0.18.") force_arrays = options.pop('force_arrays', False) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if force_arrays: warnings.warn("The force_arrays option is deprecated and will be " "removed in 0.18.", DeprecationWarning) arrays = [check_array(x, 'csr', ensure_2d=False, force_all_finite=False) if x is not None else x for x in arrays] if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) n_samples = _num_samples(arrays[0]) cv = ShuffleSplit(n_samples, test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests
bsd-3-clause
Garrett-R/scikit-learn
examples/linear_model/lasso_dense_vs_sparse_data.py
345
1862
""" ============================== Lasso on dense and sparse data ============================== We show that linear_model.Lasso provides the same results for dense and sparse data and that in the case of sparse data the speed is improved. """ print(__doc__) from time import time from scipy import sparse from scipy import linalg from sklearn.datasets.samples_generator import make_regression from sklearn.linear_model import Lasso ############################################################################### # The two Lasso implementations on Dense data print("--- Dense matrices") X, y = make_regression(n_samples=200, n_features=5000, random_state=0) X_sp = sparse.coo_matrix(X) alpha = 1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) t0 = time() sparse_lasso.fit(X_sp, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(X, y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_)) ############################################################################### # The two Lasso implementations on Sparse data print("--- Sparse matrices") Xs = X.copy() Xs[Xs < 2.5] = 0.0 Xs = sparse.coo_matrix(Xs) Xs = Xs.tocsc() print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100)) alpha = 0.1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) t0 = time() sparse_lasso.fit(Xs, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(Xs.toarray(), y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
bsd-3-clause
tian-zhou/InstrumentSegmentation
main.py
1
33553
""" ------------------------------------------------------------------------------- Name: test module Purpose: test purpose Idea: how to solve it Author: Tian Zhou Email: zhou338 [at] purdue [dot] edu Created: 19/11/2015 Copyright: (c) Tian Zhou 2015 ------------------------------------------------------------------------------- """ import numpy as np import cv2 from matplotlib import pyplot as plt #from sklearn_Agglomerative import * from TianUtility import * from time import time from sklearn import mixture import sys sys.path.append('.\morphsnakes-master') import morphsnakes class Codebook: def __init__(self): pass """ Read in the color, depth and mask image from the training data set return the readin images """ def ReadImage(self, fileIndex = 1, show = False): # read in image colorFileName = 'trainImage\\color_'+str(fileIndex)+'.jpg' depthFileName = 'trainImage\\depth_'+str(fileIndex)+'.jpg' maskFileName = 'trainImage\\mask_'+str(fileIndex)+'.jpg' imgBGR = cv2.imread(colorFileName, 1) imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) imgDepth = cv2.imread(depthFileName, 0) imgMaskGray = cv2.imread(maskFileName, 0) # erod the mask so that the boundary won't bother us imgMaskGray = cv2.erode(imgMaskGray, np.ones((5,5),np.uint8)) _, imgMask = cv2.threshold(imgMaskGray,127,255,cv2.THRESH_BINARY) # rescale depth and mask imgDepth, imgMask = self.RescaleDepth(imgBGR, imgDepth, imgMask) # histogram equalization on the gray image imgGrayHistEqu = self.IlluminationNorm(imgGray) if show == True: cv2.imshow("imgBGR", imgBGR) cv2.imshow("imgGray", imgGray) cv2.imshow('imgDepth', imgDepth) cv2.imshow('imgMask', imgMask) WaitKey(1) return imgBGR, imgGray, imgGrayHistEqu, imgDepth, imgMask """ rescale the depth and mask image to have the same size as color image here the height ratio is actually different with width ratio, with a small difference which we will ignore. """ def RescaleDepth(self, imgBGR, imgDepth, imgMask): h,w,_ = imgBGR.shape imgDepth = cv2.resize(imgDepth,(w, h), interpolation = cv2.INTER_CUBIC) imgMask = cv2.resize(imgMask,(w, h), interpolation = cv2.INTER_CUBIC) return imgDepth, imgMask """ apply the mask to the input image """ def ApplyMask(self, img, imgMask): return cv2.bitwise_and(img, img, mask = imgMask) """ Normalize the image so that it is illumination invariant Method 1: histogram equalization Method 2: normalize the grayscale value directly (does not work) """ def IlluminationNorm(self, imgGray, show = False): # method 1 imgGrayHistEqu = cv2.equalizeHist(imgGray) # method 2 # imgGrayNorm = imgGray.copy() # cv2.normalize(imgGrayNorm, imgGrayNorm, 0, 255, cv2.NORM_MINMAX) # the same # cv2.normalize(imgGrayNorm.astype(np.float), imgGrayNorm, 1) # cv2.normalize(imgGrayNorm, imgGrayNorm, 0, 255, cv2.NORM_MINMAX) # the same # display if show == True: cv2.imshow("imgGray", imgGray) cv2.imshow("imgGrayHistEqu", imgGrayHistEqu) #cv2.imshow("imgGrayNorm", imgGrayNorm) WaitKey(1) return imgGrayHistEqu """ Apply non-maximum suppresion to avoid duplicate positive responses in a near window. If the positive response is not the largest in a neighboring window (WxW), it will be assigned 0. We only keep the largest. THIS FUNCTION RUNS SLOW! THREE LOOPS! """ def NonMaximumSuppresion(self, imgMask, imgResponse, large = True, winsize = 15): startTime = time() imgMaskCp = imgMask.copy() M,N = imgMaskCp.shape halfwinsize = int(winsize/2) for i in range(halfwinsize, M-halfwinsize): for j in range(halfwinsize, N-halfwinsize): if imgMaskCp[i,j] == 255: if large == True: if imgResponse[i,j] != \ np.amax(imgResponse[i-halfwinsize:i+halfwinsize+1,\ j-halfwinsize:j+halfwinsize+1]): imgMaskCp[i,j] = 0 elif large == False: if imgResponse[i,j] != \ np.amin(imgResponse[i-halfwinsize:i+halfwinsize+1,\ j-halfwinsize:j+halfwinsize+1]): imgMaskCp[i,j] = 0 elapsedTime = time() - startTime print "NonMaximumSuppresion function takes %f seconds to finish" % elapsedTime return imgMaskCp """ Get the Harris Corner responce matrix, also threshold it with min or max @ large: if True gets the large responce (corners) if False gets the small response (plain area) @ tau: the threshold = tau * response.max(), 1e-6 for small and 1e-2 for large if edge, response -> -inf if cornder, response -> +inf if plain, response -> 0 we want both edges and corners, so we threshold on abs(response) """ def HarrisCorner(self, imgBGR, imgGray, imgMask, large = True, tau = 1e-2, show = True, NMS = True): imgGray = np.float32(imgGray) response = cv2.cornerHarris(imgGray,blockSize = 2, ksize = 3, k = 0.04) responseAbs = abs(response) # threshold the response if large == True: _,cornerMask = cv2.threshold(responseAbs,tau*responseAbs.max(),255,cv2.THRESH_BINARY) # apply mask to select only foreground cornerMask = self.ApplyMask(cornerMask, imgMask) elif large == False: _,cornerMask = cv2.threshold(responseAbs,tau*responseAbs.max(),255,cv2.THRESH_BINARY_INV) cornerMask = self.ApplyMask(cornerMask, imgMask) # median blue cornerMask = cornerMask.astype(np.uint8) if large == True: cornerMask = cv2.medianBlur(cornerMask, 3) else: cornerMask = cv2.medianBlur(cornerMask, 7) # non-maximum suppresion if NMS == True: cornerMaskNMS = self.NonMaximumSuppresion(cornerMask, responseAbs, large) else: cornerMaskNMS = cornerMask.copy() cornerMaskNMSDilate = cv2.dilate(cornerMaskNMS, None) # rescale responseAbs for better visualization, it is a float image cv2.normalize(responseAbs, responseAbs, 0, 1, cv2.NORM_MINMAX) if show == True: imgBGRShow = imgBGR.copy() imgBGRShow[cornerMaskNMSDilate == 255]=[0,0,255] cv2.imshow('responseAbs', responseAbs) cv2.imshow('imgBGR with corner', imgBGRShow) cv2.imshow('cornerMask', cornerMask) cv2.imshow('cornerMaskNMSDilate', cornerMaskNMSDilate) WaitKey(0) return cornerMaskNMS """ display the two masks for foreground and background together on the image """ def ShowTwoMasksTogether(self, imgBGR, fgMask, bgMask): imgBGRShow = imgBGR.copy() imgBGRShow[fgMask == 255]=[255,255,0] imgBGRShow[bgMask == 255]=[0,0,255] cv2.imshow('imgBGR with corner', imgBGRShow) WaitKey(0) """ Extract patches based on masks. For each non-zero element in the mask, we extract the WxW patch surrounding it """ def ExtractPatches(self, imgBGR, cornerMask, W = 15): M,N,_ = imgBGR.shape halfW = int(W/2) colorPatches = [] for i in range(halfW, M-halfW): for j in range(halfW, N-halfW): if cornerMask[i,j] == 255: colorPatch = imgBGR[i-halfW: i+halfW+1, j-halfW: j+halfW+1] colorPatches.append(colorPatch) return colorPatches """ resize the color patches and visualize them """ def VisPatches(self, colorPatches): N = len(colorPatches) print "Display %d patches." % N Nsqrt = int(np.sqrt(N)) enlargeFactor = 4 bigN = 15 * enlargeFactor bigImage = np.zeros((bigN*Nsqrt, bigN*Nsqrt, 3), np.uint8) for i in range(Nsqrt**2): bigImage[(i % Nsqrt) * bigN: (i % Nsqrt) * bigN + bigN, (i/Nsqrt) * bigN: (i/Nsqrt) * bigN+bigN, :] \ = cv2.resize(colorPatches[i], None, fx=enlargeFactor, fy=enlargeFactor,interpolation = cv2.INTER_CUBIC) #cv2.imshow('Patch'+str(i+1), cv2.resize(colorPatches[i], None, fx=2, fy=2,interpolation = cv2.INTER_CUBIC)) #cv2.moveWindow('Patch'+str(i+1), (i % Nsqrt) * 30, (i/Nsqrt) * 30) #WaitKey(10) cv2.imshow('Extracted patches', bigImage) WaitKey(0) """ calculate the NGC between two image patches it should be a value in [-1,1] to evaluate the similarity between two images NGC = +1 : same image -1 : entirely different image 0 : no relation """ def NGC(self, img1, img2): img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY).astype(np.float) img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY).astype(np.float) M, N = img1.shape M2, N2 = img2.shape assert (M == M2 and N == N2) numerator = np.sum(np.multiply(img1 - np.mean(img1), img2-np.mean(img2))) denominator = M * np.std(img1) * M * np.std(img2) ngc = numerator/denominator ngc = min(ngc, 1) assert (abs(ngc) <= 1) return ngc """ Given a color patches list, generate the similarity matrix within the list The similarity measure is NGC between two images RUNS SLOW!!! NGC takes time and this is O(N^2) """ def GetSimilarityMatrix(self, colorPatches): start_time = time() N = len(colorPatches) simi = np.zeros((N,N), np.float) for i in range(N): for j in range(i+1, N): simi[i,j] = self.NGC(colorPatches[i], colorPatches[j]) elapsed_time = time() - start_time print "GetSimilarityMatrix function takes %f seconds to finish" % elapsed_time return simi """ codebook generation from one image """ def CodebookGenerationOneImage(self, imgBGR, imgGrayHistEqu, imgMask, _large, N_clusters = 10): if _large == True: Mask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = True, tau = 1e-2) else: Mask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = False, tau = 1e-6) ColorPatches = self.ExtractPatches(imgBGR, Mask) self.VisPatches(ColorPatches) Simi = self.GetSimilarityMatrix(ColorPatches) Dist = 1-Simi Clusters, SumDist = AggloCluster(Dist, _n_clusters = N_clusters) for i in range(len(Clusters)): Clusters[i].assignWeight(len(ColorPatches)) Clusters[i].plot(ColorPatches) return ColorPatches, len(ColorPatches), Clusters """ codebook generation from the input color features the colorpatches are used to plot the elements in the clustering better """ def CodebookGeneration(self, ColorFeatures, ColorPatches, _N_comp = 10, show = False): obs = ColorFeatures GMM = mixture.GMM(n_components=_N_comp) GMM.fit(obs) Labels = GMM.predict(obs) Clusters = [] for i in range(_N_comp): c = Cluster(i, np.nonzero(Labels == i)[0], ColorPatches) c.assignWeight(GMM.weights_[i]) if show == True: c.plot(ColorPatches) Clusters.append(c) return Clusters, GMM """ evaluate the training fg and bg GMMs """ def EvaluateGMM(self, fgGMM, bgGMM, fgColorFeatures, bgColorFeatures): # Evaluate GMM result o = fgColorFeatures fgLikelihood = fgGMM.score(o) bgLikelihood = bgGMM.score(o) pred = fgLikelihood > bgLikelihood print "fg accuracy: ", np.round(np.sum(pred)/float(len(fgLikelihood)), 3) o = bgColorFeatures fgLikelihood = fgGMM.score(o) bgLikelihood = bgGMM.score(o) pred = fgLikelihood < bgLikelihood print "bg accuracy: ", np.round(np.sum(pred)/float(len(bgLikelihood)), 3) """ Shift the NGCs from range [-1,1] to [0,2] so that they are all positive. Then normalize all the NGCs into a probability distribution """ def NormalizeNGCs(self, NGCs): NGCs += 1 NGCs /= np.sum(NGCs) return NGCs """ for each observation, estimate P(o|H) P(o|H) = sum_j {P(o|I_j, H) * P(I_j| H)} """ def CalcLikelihood(self, o, ColorClusters): NGCs = np.zeros(len(ColorClusters)) for j in range(len(ColorClusters)): NGCs[j] = self.NGC(o, ColorClusters[j].avgImg) """ 1, uniform patch, bad """ ## sumP = np.sum(NGCs + 1) """ 3, full average, worst """ ## sumP = np.sum([NGCs[i] * ColorClusters[i].weight for i in range(len(ColorClusters))]) """ 4, max pooling, good """ # maxP = np.amax([NGCs[i] * ColorClusters[i].weight for i in range(len(ColorClusters))]) """ 5, max pooling, best """ maxP = np.amax(NGCs) return maxP """ Read the saved color patches, for background or foreground """ def ReadColorPatches(self, fg = True, _N = 100): ColorPatches = [] if fg == True: N = min(_N, 535) # in total we have 535 fg images for i in range(N): ColorPatches.append(cv2.imread(".\\trainPatches\\foreground\\fg_"+str(i) + ".jpg", 1)) elif fg == False: N = min(_N, 1131) # in total we have 1131 bg images for i in range(N): ColorPatches.append(cv2.imread(".\\trainPatches\\background\\bg_"+str(i) + ".jpg", 1)) return ColorPatches """ create features for the color patches """ def ColorFeature(self, ColorPatches, method = 'gray'): N = len(ColorPatches) W, _, _ = ColorPatches[0].shape if method == 'gray': # stack the gray values features = np.zeros((N, W**2)) for i in range(N): features[i,:] = np.ravel(cv2.cvtColor(ColorPatches[i], cv2.COLOR_BGR2GRAY)) if method == 'hist': # histogram for the grayscale images Nbins = 16 features = np.zeros((N, Nbins)) for i in range(N): features[i,:] = np.ravel(cv2.calcHist([cv2.cvtColor(ColorPatches[i], cv2.COLOR_BGR2GRAY)], [0], None, [Nbins], [0, 256])) if method == 'hog': # hog for the 15x15 patch pass if method == 'sift': pass if method == 'surf': pass return features """ Detect and draw SIFT features on a given image """ def SIFT(self, imgBGR, show = True): imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) sift = cv2.SIFT() kp, des = sift.detectAndCompute(imgGray,None) # the SIFT descriptor is 128 dimension imgBGRcp = imgBGR.copy() imgBGRcp = cv2.drawKeypoints(imgBGRcp,kp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imshow('img_SIFT', imgBGRcp) WaitKey(0) """ Detect and draw SURF features on a given image It seems that SURF is easier to play with since it is easier to set parameters """ def SURF(self, imgBGR, show = True): imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) surf = cv2.SURF(4000) # small number -> more salient points kp, des = surf.detectAndCompute(imgGray,None) # the SIFT descriptor is 128 dimension imgBGRcp = imgBGR.copy() imgBGRcp = cv2.drawKeypoints(imgBGRcp,kp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imshow('img_SURF', imgBGRcp) WaitKey(0) """ check the SIFT/SURF interesting points for the input image """ def SIFT_SURF_test(self, imgBGR): self.SIFT(imgBGR) self.SURF(imgBGR) """ experiment 1: test the opencv sift and surf interesting point extractor visualize them to see how good they can capture the foreground and background """ def exp1(self, imgBGR): self.SIFT_SURF_test(imgBGR) """ experiment 2: show masks for both foreground and background. need to disable the non-maximum suppression in HarrisCorner function for better visualization """ def exp2(self, imgBGR, imgGrayHistEqu, imgMask): fgMask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = True, tau = 1e-2, NMS = False) bgMask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = False, tau = 1e-6, NMS = False) self.ShowTwoMasksTogether(imgBGR, fgMask, bgMask) """ experiment 3: test differnt cluster size for the aggomerative clustering """ def exp3(self, imgBGR, imgGrayHistEqu, imgMask, _large): if _large == True: Mask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = True, tau = 1e-2) else: Mask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = False, tau = 1e-6) ColorPatches = self.ExtractPatches(imgBGR, Mask) self.VisPatches(ColorPatches) Simi = self.GetSimilarityMatrix(ColorPatches) Dist = 1-Simi numClusRange = range(5, 100, 5) SumDist = np.zeros((len(numClusRange))) times = np.zeros((len(numClusRange))) for i in range(len(numClusRange)): startTime = time() Clusters, SumDist[i] = AggloCluster(Dist, numClusRange[i]) elapsedTime = time() - startTime times[i] = elapsedTime # print "number of cluters: %i, sum Distance: %f" % (numClusRange[i], SumDist[i]) plt.figure(1) plt.plot(numClusRange, SumDist) plt.title("experiemnt with number of clusters in the agglomerative clustering") plt.xlabel("number of clusters") plt.ylabel("sum of distances within clusters (large -> bad cluster)") plt.figure(2) plt.plot(numClusRange, times) plt.title("experiemnt with number of clusters in the agglomerative clustering") plt.xlabel("number of clusters") plt.ylabel("elapsed time for convergence (in seconds)") plt.show() """ experiment 4: agglomerative clustering + NGC """ def exp4(self): for fileIndex in range(1,21): imgBGR, imgGray, imgGrayHistEqu, imgDepth, imgMask = self.ReadImage(fileIndex) # codebook generation fgColorPatches, fgColorPatchesN, fgColorClusters = self.CodebookGenerationOneImage(imgBGR, imgGrayHistEqu, imgMask, _large = True, N_clusters = 40) # 40 is better than 10 bgColorPatches, bgColorPatchesN, bgColorClusters = self.CodebookGenerationOneImage(imgBGR, imgGrayHistEqu, imgMask, _large = False, N_clusters = 40) # for each observation, estimate P(o|I_j, H_1) pred = np.zeros(fgColorPatchesN) for i in range(fgColorPatchesN): o = fgColorPatches[i] fgLikelihood = self.CalcLikelihood(o, fgColorClusters) bgLikelihood = self.CalcLikelihood(o, bgColorClusters) pred[i] = fgLikelihood > bgLikelihood #print "fgLikelihood: %.2f, bgLikelihood %.2f" % (fgLikelihood, bgLikelihood) print "accuracy: ",np.sum(pred)/float(fgColorPatchesN) pred = np.zeros(bgColorPatchesN) for i in range(bgColorPatchesN): o = bgColorPatches[i] fgLikelihood = self.CalcLikelihood(o, fgColorClusters) bgLikelihood = self.CalcLikelihood(o, bgColorClusters) pred[i] = fgLikelihood < bgLikelihood #print "fgLikelihood: %.2f, bgLikelihood %.2f" % (fgLikelihood, bgLikelihood) print "accuracy: ",np.sum(pred)/float(bgColorPatchesN) # end the program """ save the local color patches for both fg and bg """ def SaveLocalColorPatches(self): for fileIndex in range(1,21): imgBGR, imgGray, imgGrayHistEqu, imgDepth, imgMask = self.ReadImage(fileIndex) global GLOBAL_numbg, GLOBAL_numfg fgMask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = True, tau = 1e-2) fgColorPatches = self.ExtractPatches(imgBGR, fgMask) for i in range(len(fgColorPatches)): cv2.imwrite(".\\trainPatches\\foreground\\fg_"+str(i+GLOBAL_numfg) + ".jpg", fgColorPatches[i]) GLOBAL_numfg = len(fgColorPatches) bgMask = self.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = False, tau = 1e-6) bgColorPatches = self.ExtractPatches(imgBGR, bgMask) for i in range(len(bgColorPatches)): cv2.imwrite(".\\trainPatches\\background\\bg_"+str(i+GLOBAL_numbg) + ".jpg", bgColorPatches[i]) GLOBAL_numbg = len(bgColorPatches) """ Given an input image and a histogram, do the histogram backprojection return a mask """ def HistBackProj(self, imgBGR, target_hist, show = True): imgHSV = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV) cv2.normalize(target_hist,target_hist,0,255,cv2.NORM_MINMAX) prob = cv2.calcBackProject([imgHSV],[0,1], target_hist,[0,180,0,256],1) if show: cv2.imshow('prob', prob) # Now convolute with circular disc disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) cv2.filter2D(prob,-1,disc,prob) if show: cv2.imshow('prob2', prob) # threshold and binary AND ret, mask = cv2.threshold(prob,128,255,0) mask = cv2.merge((mask,mask,mask)) ROIimgBGR = cv2.bitwise_and(imgBGR,mask) if show: cv2.imshow('mask', mask) cv2.imshow('ROIimgBGR', ROIimgBGR) WaitKey(1) return prob, mask, ROIimgBGR """ use contour manipulation to get rid of the small components in the foreground """ def ContourClean(self, mask): _, BW = cv2.threshold(mask,127,255,0) contours, hierarchy = cv2.findContours(BW,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key = cv2.contourArea, reverse = True)[:10] cntImg = np.zeros(mask.shape, np.uint8) for i in range(len(contours)): cnt = contours[i] M = cv2.moments(cnt) area = cv2.contourArea(cnt) if i == 0: areaT = 0.05 * area if area > areaT: #print "\nContour index %i" % i #print "This area: %f" % area cv2.drawContours(cntImg, contours, i, 255, -1) cv2.imshow('cntImg', cntImg) WaitKey(1) return cntImg """ smoothing: large, the boundary is smoother small, the boundary is coarse good: 5 threshold: small, keep shrinking and will vanish large, does not move at all good: 0.3 balloon: <0, contour become smaller >0, contour becomes larger good: -1 """ def snake(self, imgBGR, imgMask): # Load the image. #imgcolor = imread("../training/color_1.jpg")/255.0 imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) imgBGR = (imgBGR/255.0).astype(np.float) imgGray =(imgGray/255.0).astype(np.float) # g(I) gI = morphsnakes.gborders(imgGray, alpha=1000, sigma=2) # Morphological GAC. Initialization of the level-set. mgac = morphsnakes.MorphGAC(gI, smoothing=10, threshold=0.3, balloon=-1) _, imgMask = cv2.threshold(imgMask,127,255,cv2.THRESH_BINARY) mgac.levelset = imgMask # plot cv2.imshow('imgBGR', imgBGR) cv2.imshow('imgGray', imgGray) cv2.imshow('gI', gI) cv2.imshow('mgac.levelset', mgac.levelset) cv2.waitKey(0) # Visual evolution. plt.figure() b,g,r = cv2.split(imgBGR) imgRGB = cv2.merge([r,g,b]) morphsnakes.evolve_visual(mgac, num_iters=10, background=imgRGB) return mgac.levelset GLOBAL_numfg = 0 GLOBAL_numbg = 0 class Cluster: def __init__(self, id, components, colorPatches): self.components = components self.count = len(self.components) self.clusterID = id self.weight = 0 self.avgImg = None self.bigImage= None self.hist = None self.enlargeFactor = 6 self.summarize(colorPatches) def summarize(self, colorPatches): # get average image accumu = np.zeros(colorPatches[0].shape, np.float) for i in range(self.count): cv2.accumulate(colorPatches[self.components[i]], accumu) self.avgImg = (accumu/self.count).astype(np.uint8) # get hist W,H,_ = colorPatches[0].shape self.bigImage = np.zeros((W, H*self.count, 3), np.uint8) for i in range(self.count): self. bigImage[:, i*H : (i+1)*H, :] = colorPatches[self.components[i]] bigImageHSV = cv2.cvtColor(self.bigImage, cv2.COLOR_BGR2HSV) self.hist = cv2.calcHist([bigImageHSV], [0,1], None, [180, 256],[0, 180,0,256]) # self.plot(colorPatches) # get pca image # it seems that the PCA is not as good as the average image to visualize the cluster # self.CalcPCA(colorPatches) def CalcPCA(self, colorPatches, K = 1, show = True): width, height, _ = colorPatches[0].shape N = len(self.components) X = np.zeros((width*height*3 , N)) for i in range(N): #x_vec = np.ravel(colorPatches[i][:,:,0]).astype(float) x_vec = np.ravel(colorPatches[self.components[i]]).astype(float) #x_vec = np.ravel(cv2.cvtColor(colorPatches[self.components[i]], cv2.COLOR_BGR2GRAY)).astype(float) x_vec /= np.linalg.norm(x_vec) X[:,i] = x_vec GrandMean = np.mean(X,1) for i in range(N): X[:,i] -= GrandMean U,s,Vt = np.linalg.svd(np.dot(np.transpose(X), X)) Wp = np.zeros((width*height*3, K)) for i in range(K): wi = np.dot(X,U[:,i]) wi /= np.linalg.norm(wi) Wp[:,i] = wi # Visualize the most significant Eigen-Patterns if show == True: for k in range(K): w = np.reshape(Wp[:,k]+GrandMean, (width,height,3)).astype(np.float) cv2.normalize(w, w, 0, 255, cv2.NORM_MINMAX) # the same w = w.astype(np.uint8) #wint_color = cv2.applyColorMap(wint, cv2.COLORMAP_JET) cv2.imshow('w'+str(k),cv2.resize(w, None, fx=self.enlargeFactor, fy=self.enlargeFactor, interpolation = cv2.INTER_CUBIC)) #cv2.imshow('wint_color'+str(k),cv2.resize(wint_color, None, fx=self.enlargeFactor, fy=self.enlargeFactor, interpolation = cv2.INTER_CUBIC)) WaitKey(0) def __repr__(self): return '(clusterID %i, count %i, weight %f)' % (self.clusterID, self.count,self.weight) def assignWeight(self, weight): self.weight = weight def plot(self, colorPatches): print self cv2.imshow('Cluster'+str(self.clusterID), cv2.resize(self.bigImage, None, fx=self.enlargeFactor, fy=self.enlargeFactor,interpolation = cv2.INTER_CUBIC)) cv2.imshow('Rescaled Average image'+str(self.clusterID), cv2.resize(self.avgImg, None, fx=self.enlargeFactor, fy=self.enlargeFactor,interpolation = cv2.INTER_CUBIC)) WaitKey(1) def main(): cb = Codebook() """ GMM to fit the color features for foreground and background """ N_comp = 10 # surprisingly, 10 is the best... fgColorPatches = cb.ReadColorPatches(fg = True, _N = 2000) fgColorFeatures = cb.ColorFeature(fgColorPatches, method = 'gray') fgClusters, fgGMM = cb.CodebookGeneration(fgColorFeatures, fgColorPatches, _N_comp = N_comp) bgColorPatches = cb.ReadColorPatches(fg = False, _N = 2000) bgColorFeatures = cb.ColorFeature(bgColorPatches, method = 'gray') bgClusters, bgGMM = cb.CodebookGeneration(bgColorFeatures, bgColorPatches, _N_comp = N_comp) """ Evaluate GMM result on the same training data TO-DO, should split the data into training and validation """ cb.EvaluateGMM(fgGMM, bgGMM, fgColorFeatures, bgColorFeatures) """ segmentation for each image using the training GMMs """ for fileIndex in range(1,21): imgBGR, imgGray, imgGrayHistEqu, imgDepth, imgMask = cb.ReadImage(fileIndex) """ !!! """ enableWeight = True """ histogram back projection for foreground """ fgProbs = np.zeros((imgBGR.shape[0:2]), np.float) for i in range(N_comp): prob, mask, ROIimgBGR = cb.HistBackProj(imgBGR, fgClusters[i].hist, show=False) if enableWeight == False: fgProbs += prob else: fgProbs += prob.astype(np.float) * fgClusters[i].weight fgProbs = np.divide(fgProbs, N_comp).astype(np.uint8) #cv2.imshow('fgProbs', fgProbs) #ret, fgMask = cv2.threshold(fgProbs,180,255,0) ret, fgMask = cv2.threshold(fgProbs,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) fgMask = cb.ApplyMask(fgMask, imgMask) fgMask = cv2.medianBlur(fgMask, 5) fgMask3Channel = cv2.merge((fgMask,fgMask,fgMask)) fgimgBGR = cv2.bitwise_and(imgBGR,fgMask3Channel) #cv2.imshow('fgMask', fgMask) cv2.imshow('fgimgBGR', fgimgBGR) """ histogram back projection for background """ bgProbs = np.zeros((imgBGR.shape[0:2]), np.float) for i in range(N_comp): prob, mask, ROIimgBGR = cb.HistBackProj(imgBGR, bgClusters[i].hist, show=False) if enableWeight == False: bgProbs += prob else: bgProbs += prob.astype(np.float) * bgClusters[i].weight bgProbs = np.divide(bgProbs, N_comp).astype(np.uint8) #cv2.imshow('bgProbs', bgProbs) #ret, bgMask = cv2.threshold(bgProbs,100,255,0) ret, bgMask = cv2.threshold(bgProbs,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) bgMask = cb.ApplyMask(bgMask, imgMask) bgMask = cv2.medianBlur(bgMask, 5) bgMask3Channel = cv2.merge((bgMask,bgMask,bgMask)) bgimgBGR = cv2.bitwise_and(imgBGR,bgMask3Channel) #cv2.imshow('bgMask', bgMask) cv2.imshow('bgimgBGR', bgimgBGR) """ ratio of fg prob over bg prob """ bgProbs = (bgProbs + 0.1).astype(np.float) ratio = np.divide(fgProbs.astype(np.float), bgProbs) rho = 3 combMask = (255 * (ratio > rho)).astype(np.uint8) combMask = cb.ApplyMask(combMask, imgMask) combMask = cv2.medianBlur(combMask, 9) cv2.imshow('combMask', combMask) combMask3Channel = cv2.merge((combMask,combMask,combMask)) combimgBGR = cv2.bitwise_and(imgBGR,combMask3Channel) cv2.imshow('combimgBGR', combimgBGR) """ dilate the mask and then use snake algorithm """ kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)) combMaskBig = cv2.dilate(combMask, kernel, iterations = 3) cv2.imshow('combMaskBig', combMaskBig) combMaskBig3Channel = cv2.merge((combMaskBig,combMaskBig,combMaskBig)) combimgBGRBig = cv2.bitwise_and(imgBGR, combMaskBig3Channel) cv2.imshow("combimgBGRBig", combimgBGRBig) """ use contour manipulation to clean the fg mask """ cleanMask = cb.ContourClean(combMaskBig) cv2.imwrite('.\\resultMask\\mask_'+str(fileIndex)+'.jpg', cleanMask) WaitKey(0) """ snake!!! """ snakeMask = cb.snake(imgBGR, cleanMask) continue """ use the same Harris corner detector to find the salient points for both fg and bg, then use the GMM model to find out if the patches belong to foreground or background This is going to work very well. I think. But this is over-fitting to our data. It is not right. """ fgMask = cb.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = True, tau = 1e-2) fgColorPatches = cb.ExtractPatches(imgBGR, fgMask) fgColorFeatures = cb.ColorFeature(fgColorPatches, method = 'gray') o = fgColorFeatures fgLikelihood = fgGMM.score(o) bgLikelihood = bgGMM.score(o) pred = fgLikelihood > bgLikelihood fgColorModel = [fgColorPatches[i] for i in range(len(fgColorPatches)) if pred[i] == 1] bgMask = cb.HarrisCorner(imgBGR, imgGrayHistEqu, imgMask, large = False, tau = 1e-6) bgColorPatches = cb.ExtractPatches(imgBGR, bgMask) bgColorFeatures = cb.ColorFeature(bgColorPatches, method = 'gray') o = bgColorFeatures fgLikelihood = fgGMM.score(o) bgLikelihood = bgGMM.score(o) pred = fgLikelihood < bgLikelihood bgColorModel = [bgColorPatches[i] for i in range(len(bgColorPatches)) if pred[i] == 1] print 1; cv2.destroyAllWindows() if __name__ == '__main__': main()
gpl-2.0
KDB2/OpenReliability
veusz/plugins/importplugin.py
3
25778
# Copyright (C) 2010 Jeremy S. Sanders # Email: Jeremy Sanders <jeremy@jeremysanders.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################## """Import plugin base class and helpers.""" from __future__ import division import os.path import numpy as N from ..compat import crange, cstr, cstrerror from .. import utils from .. import qtall as qt4 from . import field from . import datasetplugin def _(text, disambiguation=None, context='ImportPlugin'): """Translate text.""" return qt4.QCoreApplication.translate(context, text, disambiguation) # add an instance of your class to this list to get it registered importpluginregistry = [] class ImportPluginParams(object): """Parameters to plugin are passed in this object.""" def __init__(self, filename, encoding, field_results): self.filename = filename self.encoding = encoding self.field_results = field_results def openFileWithEncoding(self): """Helper to open filename but respecting encoding.""" return utils.openEncoding(self.filename, self.encoding) class ImportPluginException(RuntimeError): """An exception to return errors about importing or previewing data.""" class ImportPlugin(object): """Define a plugin to read data in a particular format. Override doImport and optionally getPreview to define a new plugin. Register the class by adding it to the importpluginregistry list. Of promote_tab is set to some text, put the plugin on its own tab in the import dialog using that text as the tab name. """ name = 'Import plugin' author = '' description = '' # if set to some text, use this plugin on its own tab promote_tab = None # set these to get focus if a file is selected with these extensions # include the dot in the extension names file_extensions = set() def __init__(self): """Override this to declare a list of input fields if required.""" # a list of Field objects to display self.fields = [] def getPreview(self, params): """Get data to show in a text box to show a preview. params is a ImportPluginParams object. Returns (text, okaytoimport) """ f = params.openFileWithEncoding() return f.read(4096), True def doImport(self, params): """Actually import data params is a ImportPluginParams object. Return a list of datasetplugin.Dataset1D, datasetplugin.Dataset2D objects """ return [] ################################################################# class ImportPluginExample(ImportPlugin): """An example plugin for reading a set of unformatted numbers from a file.""" name = "Example plugin" author = "Jeremy Sanders" description = _("Reads a list of numbers in a text file") def __init__(self): self.fields = [ field.FieldText("name", descr=_("Dataset name"), default="name"), field.FieldBool("invert", descr=_("invert values")), field.FieldFloat("mult", descr=_("Multiplication factor"), default=1), field.FieldInt("skip", descr=_("Skip N lines"), default=0, minval=0), field.FieldCombo("subtract", items=("0", "1", "2"), editable=False, default="0") ] def doImport(self, params): """Actually import data params is a ImportPluginParams object. Return a list of datasetplugin.Dataset1D, datasetplugin.Dataset2D objects """ try: f = params.openFileWithEncoding() data = [] mult = params.field_results["mult"] sub = float(params.field_results["subtract"]) if params.field_results["invert"]: mult *= -1 for i in crange(params.field_results["skip"]): f.readline() for line in f: data += [float(x)*mult-sub for x in line.split()] return [datasetplugin.Dataset1D(params.field_results["name"], data), datasetplugin.Constant("testconst", "42"), datasetplugin.Function("testfunc(x)", "testconst*x**2")] except Exception as e: raise ImportPluginException(cstr(e)) class ImportPluginDateTime(ImportPlugin): """An example plugin for reading a set of iso date-times from a file.""" name = "Example plugin for date/times" author = "Jeremy Sanders" description = _("Reads a list of ISO date times in a text file") def __init__(self): self.fields = [ field.FieldText("name", descr=_("Dataset name"), default="name"), ] def doImport(self, params): """Actually import data params is a ImportPluginParams object. Return a list of datasetplugin.Dataset1D, datasetplugin.Dataset2D objects """ f = params.openFileWithEncoding() data = [] for line in f: data.append( datasetplugin.DatasetDateTime. dateStringToFloat(line.strip()) ) return [ datasetplugin.DatasetDateTime(params.field_results["name"], data) ] #importpluginregistry.append( ImportPluginDateTime ) class QdpFile(object): """Handle reading of a Qdp file.""" def __init__(self, colnames): self.colmodes = {} self.skipmode = 'none' self.retndata = [] # store read in data here self.data = [] # index of max vector self.dataindex = 1 self.colnames = colnames # list of data groups for 2d objects self.datagroup2d = [] # axis ranges for 2d objects self.axis2d = [None, None] def handleRead(self, p): """Handle read command.""" try: mode = {'t': 'terr', 's': 'serr'}[p[1][:1]] except (IndexError, KeyError): raise ImportPluginException(_("read command takes terr/serr")) try: cols = [int(x) for x in p[2:]] except ValueError: raise ImportPluginException(_("read command takes list of columns separated by spaces")) for c in cols: self.colmodes[c] = mode def handleSkip(self, p): """Handle skip command.""" try: self.skipmode = {'o': 'off', 's': 'single', 'd': 'double'}[p[1][:1]] except (IndexError, KeyError): raise ImportPluginException(_("skip command takes single/double/off")) def handleNO(self, p, lastp): """Handle no command, meaning no data.""" if self.skipmode == 'none': self.addNans( len(p) ) elif self.skipmode == 'single': self.pushData() del self.data[:] self.dataindex += 1 elif self.skipmode == 'double': if lastp[0] == 'no': self.pushData() del self.data[:] self.dataindex += 1 else: self.addNans( len(p) ) def addNans(self, num): """Add a blank set of data to output.""" col = 0 ds = 0 while col < num or ds < len(self.data): if ds >= len(self.data): self.data.append([]) m = self.colmodes.get(ds+1) if m == 'serr': self.data[ds].append( (N.nan, N.nan) ) col += 2 elif m == 'terr': self.data[ds].append( (N.nan, N.nan, N.nan) ) col += 3 else: self.data[ds].append( N.nan ) col += 1 ds += 1 def pushData2D(self): """Handle 2D data groups.""" for num, r1, c1, r2, c2 in self.datagroup2d: arr = [] for c in crange(c1-1,c2-1+1): arr.append( self.data[c][r1-1:r2-1+1] ) # make data as "used" self.data[c] = None arr = N.array(arr) if num-1 < len(self.colnames): name = self.colnames[num-1] else: name = 'vec2d%i' % num rangex = rangey = None if self.axis2d[0] is not None: minval, pixsize = self.axis2d[0] rangex = (minval - pixsize*0.5, minval+(arr.shape[1]-0.5)*pixsize ) if self.axis2d[1] is not None: minval, pixsize = self.axis2d[1] rangey = (minval - pixsize*0.5, minval+(arr.shape[0]-0.5)*pixsize ) ds = datasetplugin.Dataset2D(name, data=arr, rangex=rangex, rangey=rangey) self.retndata.append(ds) def pushData(self): """Add data to output array. """ for i in crange(len(self.data)): if self.data[i] is None: continue # get dataset name if i < len(self.colnames): name = self.colnames[i] else: name = 'vec%i' % (i+1) if self.skipmode == 'single' or self.skipmode == 'double': name = name + '_' + str(self.dataindex) # convert data a = N.array(self.data[i]) if len(a.shape) == 1: # no error bars ds = datasetplugin.Dataset1D(name, data=a) elif a.shape[1] == 2: # serr ds = datasetplugin.Dataset1D(name, data=a[:,0], serr=a[:,1]) elif a.shape[1] == 3: # perr/nerr p = N.where(a[:,1] < a[:,2], a[:,2], a[:,1]) n = N.where(a[:,1] < a[:,2], a[:,1], a[:,2]) ds = datasetplugin.Dataset1D(name, data=a[:,0], perr=p, nerr=n) else: raise RuntimeError self.retndata.append(ds) def handleDataGroup(self, p): """Handle data groups.""" if len(p) == 3: # we don't support the renaming thing pass elif len(p) == 6: # 2d data try: pint = [int(x) for x in p[1:]] except ValueError: raise ImportPluginException(_("invalid 2d datagroup command")) self.datagroup2d.append(pint) def handleAxis(self, p): """Axis command gives range of axes (used for 2d).""" try: minval, maxval = float(p[2]), float(p[3]) except ValueError: raise ImportPluginException(_("invalid axis range")) self.axis2d[ p[0][0] == 'y' ] = (minval, maxval) def handleNum(self, p): """Handle set of numbers.""" nums = [] try: for n in p: if n.lower() == 'no': nums.append(N.nan) else: nums.append(float(n)) except ValueError: raise ImportPluginException(_("Cannot convert '%s' to numbers") % (' '.join(p))) col = 0 ds = 0 while col < len(nums): if ds >= len(self.data): self.data.append([]) m = self.colmodes.get(ds+1) if m == 'serr': self.data[ds].append( (nums[col], nums[col+1]) ) col += 2 elif m == 'terr': self.data[ds].append( (nums[col], nums[col+1], nums[col+2]) ) col += 3 else: self.data[ds].append( nums[col] ) col += 1 ds += 1 def importFile(self, fileobj, dirname): """Read data from file object. dirname is the directory in which the file is located """ contline = None lastp = [] for line in fileobj: # strip comments if line.find("!") >= 0: line = line[:line.find("!")] if line[:1] == '@': # read another file fname = os.path.join(dirname, line[1:].strip()) try: newf = open(fname) self.importFile(newf, dirname) except EnvironmentError: pass continue p = [x.lower() for x in line.split()] if contline: # add on previous continuation if existed p = contline + p contline = None if len(p) > 0 and p[-1][-1] == '-': # continuation p[-1] = p[-1][:-1] contline = p continue if len(p) == 0: # nothing continue v0 = p[0] if v0[0] in '0123456789-.': self.handleNum(p) elif v0 == 'no': self.handleNO(p, lastp) elif v0 == 'read': self.handleRead(p) elif v0[:2] == 'sk': self.handleSkip(p) elif v0[:2] == 'dg': self.handleDataGroup(p) elif v0[:1] == 'x' or v0[:2] == 'ya': self.handleAxis(p) else: # skip everything else (for now) pass lastp = p class ImportPluginQdp(ImportPlugin): """An example plugin for reading data from QDP files.""" name = "QDP import" author = "Jeremy Sanders" description = _("Reads datasets from QDP files") file_extensions = set(['.qdp']) def __init__(self): self.fields = [ field.FieldTextMulti("names", descr=_("Vector name list "), default=['']), ] def doImport(self, params): """Actually import data params is a ImportPluginParams object. Return a list of datasetplugin.Dataset1D, datasetplugin.Dataset2D objects """ names = [x.strip() for x in params.field_results["names"] if x.strip()] f = params.openFileWithEncoding() rqdp = QdpFile(names) rqdp.importFile(f, os.path.dirname(params.filename)) rqdp.pushData2D() rqdp.pushData() f.close() return rqdp.retndata def cnvtImportNumpyArray(name, val, errorsin2d=True): """Convert a numpy array to plugin returns.""" try: val.shape except AttributeError: raise ImportPluginException(_("Not the correct format file")) try: val + 0. val = val.astype(N.float64) except TypeError: raise ImportPluginException(_("Unsupported array type")) if val.ndim == 1: return datasetplugin.Dataset1D(name, val) elif val.ndim == 2: if errorsin2d and val.shape[1] in (2, 3): # return 1d array if val.shape[1] == 2: # use as symmetric errors return datasetplugin.Dataset1D(name, val[:,0], serr=val[:,1]) else: # asymmetric errors # unclear on ordering here... return datasetplugin.Dataset1D(name, val[:,0], perr=val[:,1], nerr=val[:,2]) else: return datasetplugin.Dataset2D(name, val) else: raise ImportPluginException(_("Unsupported dataset shape")) class ImportPluginNpy(ImportPlugin): """For reading single datasets from NPY numpy saved files.""" name = "Numpy NPY import" author = "Jeremy Sanders" description = _("Reads a 1D/2D numeric dataset from a Numpy NPY file") file_extensions = set(['.npy']) def __init__(self): self.fields = [ field.FieldText("name", descr=_("Dataset name"), default=''), field.FieldBool("errorsin2d", descr=_("Treat 2 and 3 column 2D arrays as\n" "data with error bars"), default=True), ] def getPreview(self, params): """Get data to show in a text box to show a preview. params is a ImportPluginParams object. Returns (text, okaytoimport) """ try: retn = N.load(params.filename) except Exception: return _("Cannot read file"), False try: text = _('Array shape: %s\n') % str(retn.shape) text += _('Array datatype: %s (%s)\n') % (retn.dtype.str, str(retn.dtype)) text += str(retn) return text, True except AttributeError: return _("Not an NPY file"), False def doImport(self, params): """Actually import data. """ name = params.field_results["name"].strip() if not name: raise ImportPluginException(_("Please provide a name for the dataset")) try: retn = N.load(params.filename) except Exception as e: raise ImportPluginException(_("Error while reading file: %s") % cstr(e)) return [ cnvtImportNumpyArray( name, retn, errorsin2d=params.field_results["errorsin2d"]) ] class ImportPluginNpz(ImportPlugin): """For reading single datasets from NPY numpy saved files.""" name = "Numpy NPZ import" author = "Jeremy Sanders" description = _("Reads datasets from a Numpy NPZ file.") file_extensions = set(['.npz']) def __init__(self): self.fields = [ field.FieldBool("errorsin2d", descr=_("Treat 2 and 3 column 2D arrays as\n" "data with error bars"), default=True), ] def getPreview(self, params): """Get data to show in a text box to show a preview. params is a ImportPluginParams object. Returns (text, okaytoimport) """ try: retn = N.load(params.filename) except Exception: return _("Cannot read file"), False # npz files should define this attribute try: retn.files except AttributeError: return _("Not an NPZ file"), False text = [] for f in sorted(retn.files): a = retn[f] text.append(_('Name: %s') % f) text.append(_(' Shape: %s') % str(a.shape)) text.append(_(' Datatype: %s (%s)') % (a.dtype.str, str(a.dtype))) text.append('') return '\n'.join(text), True def doImport(self, params): """Actually import data. """ try: retn = N.load(params.filename) except Exception as e: raise ImportPluginException(_("Error while reading file: %s") % cstr(e)) try: retn.files except AttributeError: raise ImportPluginException(_("File is not in NPZ format")) # convert each of the imported arrays out = [] for f in sorted(retn.files): out.append( cnvtImportNumpyArray( f, retn[f], errorsin2d=params.field_results["errorsin2d"]) ) return out class ImportPluginBinary(ImportPlugin): name = "Binary import" author = "Jeremy Sanders" description = _("Reads numerical binary files.") file_extensions = set(['.bin']) def __init__(self): self.fields = [ field.FieldText("name", descr=_("Dataset name"), default=""), field.FieldCombo("datatype", descr=_("Data type"), items = ("float32", "float64", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"), default="float64", editable=False), field.FieldCombo("endian", descr=_("Endian (byte order)"), items = ("little", "big"), editable=False), field.FieldInt("offset", descr=_("Offset (bytes)"), default=0, minval=0), field.FieldInt("length", descr=_("Length (values)"), default=-1) ] def getNumpyDataType(self, params): """Convert params to numpy datatype.""" t = N.dtype(str(params.field_results["datatype"])) return t.newbyteorder( {"little": "<", "big": ">"} [ params.field_results["endian"]] ) def getPreview(self, params): """Preview of data files.""" try: f = open(params.filename, "rb") data = f.read() f.close() except EnvironmentError as e: return _("Cannot read file (%s)") % cstrerror(e), False text = [_('File length: %i bytes') % len(data)] def filtchr(c): """Filtered character to ascii range.""" if ord(c) <= 32 or ord(c) > 127: return '.' else: return c # do a hex dump (like in CP/M) for i in crange(0, min(65536, len(data)), 16): hdr = '%04X ' % i subset = data[i:i+16] hexdata = ('%02X '*len(subset)) % tuple([ord(x) for x in subset]) chrdata = ''.join([filtchr(c) for c in subset]) text.append(hdr+hexdata + ' ' + chrdata) return '\n'.join(text), True def doImport(self, params): """Import the data.""" name = params.field_results["name"].strip() if not name: raise ImportPluginException(_("Please provide a name for the dataset")) try: f = open(params.filename, "rb") f.seek( params.field_results["offset"] ) retn = f.read() f.close() except EnvironmentError as e: raise ImportPluginException(_("Error while reading file '%s'\n\n%s") % (params.filename, cstrerror(e))) try: data = N.fromstring(retn, dtype=self.getNumpyDataType(params), count=params.field_results["length"]) except ValueError as e: raise ImportPluginException(_("Error converting data for file '%s'\n\n%s") % (params.filename, cstr(e))) data = data.astype(N.float64) return [ datasetplugin.Dataset1D(name, data) ] class ImportPluginGnuplot2D(ImportPlugin): """A Veusz plugin for reading data in Gnuplot 2D data format from a file.""" name = "Gnuplot 2D data import plugin" author = "Joerg Meyer, j.meyer@chem.leidenuniv.nl" description = "Reads data in Gnuplot 2D format from a text file." file_extensions = set(['.data','.elbow']) def __init__(self): ImportPlugin.__init__(self) self.fields = [ field.FieldText( "name", descr="Dataset name", default="name"), field.FieldFloat( "subtract", descr="Offset to subtract", default=0.0), field.FieldFloat( "mult", descr="Multiplication factor", default=1), ] def doImport(self, params): """Actually import data params is a ImportPluginParams object. Return a list of ImportDataset1D, ImportDataset2D objects """ sub = float(params.field_results["subtract"]) mult = params.field_results["mult"] f = params.openFileWithEncoding() data_gp = [] data_gp_block = [] for line in f: fields = line.split() if not fields: if data_gp_block: data_gp.append( data_gp_block ) data_gp_block = [] else: # ignore initial blank lines continue elif '#' in fields[0]: # ignore comment lines continue else: x,y,z = map(float, fields[0:3]) data_gp_block.append( [x,y,(z-sub)*mult] ) if data_gp_block: # append last block if necessary data_gp.append( data_gp_block ) data_gp_block = [] data = N.array(data_gp) S = data.shape data_for_sorting = data.reshape((S[0]*S[1],S[2])) ind = N.lexsort( [data_for_sorting[:,0], data_for_sorting[:,1]] ) data_veusz = data_for_sorting[ind].reshape(S)[:,:,2] rangex = (data[:,:,0].min(),data[:,:,0].max()) rangey = (data[:,:,1].min(),data[:,:,1].max()) return [ datasetplugin.Dataset2D( params.field_results["name"], data=data_veusz, rangex=rangex, rangey=rangey) ] importpluginregistry += [ ImportPluginNpy, ImportPluginNpz, ImportPluginQdp, ImportPluginBinary, ImportPluginExample, ImportPluginGnuplot2D, ]
gpl-2.0
vinayak-mehta/scikit-learn
sklearn/metrics/cluster/_supervised.py
8
41212
"""Utilities to evaluate the clustering performance of models. Functions named as *_score return a scalar value to maximize: the higher the better. """ # Authors: Olivier Grisel <olivier.grisel@ensta.org> # Wei LI <kuantkid@gmail.com> # Diego Molla <dmolla-aliod@gmail.com> # Arnaud Fouchet <foucheta@gmail.com> # Thierry Guillemot <thierry.guillemot.work@gmail.com> # Gregory Stupp <stuppie@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Arya McCarthy <arya@jhu.edu> # Uwe F Mayer <uwe_f_mayer@yahoo.com> # License: BSD 3 clause import warnings from math import log import numpy as np from scipy import sparse as sp from ._expected_mutual_info_fast import expected_mutual_information from ...utils.multiclass import type_of_target from ...utils.validation import check_array, check_consistent_length def check_clusterings(labels_true, labels_pred): """Check that the labels arrays are 1D and of same dimension. Parameters ---------- labels_true : array-like of shape (n_samples,) The true labels. labels_pred : array-like of shape (n_samples,) The predicted labels. """ labels_true = check_array( labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None, ) labels_pred = check_array( labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None, ) type_label = type_of_target(labels_true) type_pred = type_of_target(labels_pred) if "continuous" in (type_pred, type_label): msg = ( "Clustering metrics expects discrete values but received" f" {type_label} values for label, and {type_pred} values " "for target" ) warnings.warn(msg, UserWarning) # input checks if labels_true.ndim != 1: raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,)) if labels_pred.ndim != 1: raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,)) check_consistent_length(labels_true, labels_pred) return labels_true, labels_pred def _generalized_average(U, V, average_method): """Return a particular mean of two numbers.""" if average_method == "min": return min(U, V) elif average_method == "geometric": return np.sqrt(U * V) elif average_method == "arithmetic": return np.mean([U, V]) elif average_method == "max": return max(U, V) else: raise ValueError( "'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'" ) def contingency_matrix( labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64 ): """Build a contingency matrix describing the relationship between labels. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. eps : float, default=None If a float, that value is added to all values in the contingency matrix. This helps to stop NaN propagation. If ``None``, nothing is adjusted. sparse : bool, default=False If `True`, return a sparse CSR continency matrix. If `eps` is not `None` and `sparse` is `True` will raise ValueError. .. versionadded:: 0.18 dtype : numeric type, default=np.int64 Output dtype. Ignored if `eps` is not `None`. .. versionadded:: 0.24 Returns ------- contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred] Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in true class :math:`i` and in predicted class :math:`j`. If ``eps is None``, the dtype of this array will be integer unless set otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype will be float. Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``. """ if eps is not None and sparse: raise ValueError("Cannot set 'eps' when sparse=True") classes, class_idx = np.unique(labels_true, return_inverse=True) clusters, cluster_idx = np.unique(labels_pred, return_inverse=True) n_classes = classes.shape[0] n_clusters = clusters.shape[0] # Using coo_matrix to accelerate simple histogram calculation, # i.e. bins are consecutive integers # Currently, coo_matrix is faster than histogram2d for simple cases contingency = sp.coo_matrix( (np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=dtype, ) if sparse: contingency = contingency.tocsr() contingency.sum_duplicates() else: contingency = contingency.toarray() if eps is not None: # don't use += as contingency is integer contingency = contingency + eps return contingency # clustering measures def pair_confusion_matrix(labels_true, labels_pred): """Pair confusion matrix arising from two clusterings [1]_. The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix between two clusterings by considering all pairs of samples and counting pairs that are assigned into the same or into different clusters under the true and predicted clusterings. Considering a pair of samples that is clustered together a positive pair, then as in binary classification the count of true negatives is :math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is :math:`C_{11}` and false positives is :math:`C_{01}`. Read more in the :ref:`User Guide <pair_confusion_matrix>`. Parameters ---------- labels_true : array-like of shape (n_samples,), dtype=integral Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,), dtype=integral Cluster labels to evaluate. Returns ------- C : ndarray of shape (2, 2), dtype=np.int64 The contingency matrix. See Also -------- rand_score: Rand Score. adjusted_rand_score: Adjusted Rand Score. adjusted_mutual_info_score: Adjusted Mutual Information. References ---------- .. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions." Journal of Classification 2, 193–218 (1985). <10.1007/BF01908075>` Examples -------- Perfectly matching labelings have all non-zero entries on the diagonal regardless of actual label values: >>> from sklearn.metrics.cluster import pair_confusion_matrix >>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0]) array([[8, 0], [0, 4]]... Labelings that assign all classes members to the same clusters are complete but may be not always pure, hence penalized, and have some off-diagonal non-zero entries: >>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1]) array([[8, 2], [0, 2]]... Note that the matrix is not symmetric. """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = np.int64(labels_true.shape[0]) # Computation using the contingency data contingency = contingency_matrix( labels_true, labels_pred, sparse=True, dtype=np.int64 ) n_c = np.ravel(contingency.sum(axis=1)) n_k = np.ravel(contingency.sum(axis=0)) sum_squares = (contingency.data**2).sum() C = np.empty((2, 2), dtype=np.int64) C[1, 1] = sum_squares - n_samples C[0, 1] = contingency.dot(n_k).sum() - sum_squares C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares C[0, 0] = n_samples**2 - C[0, 1] - C[1, 0] - sum_squares return C def rand_score(labels_true, labels_pred): """Rand index. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings [1]_ [2]_. The raw RI score [3]_ is: RI = (number of agreeing pairs) / (number of pairs) Read more in the :ref:`User Guide <rand_score>`. Parameters ---------- labels_true : array-like of shape (n_samples,), dtype=integral Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,), dtype=integral Cluster labels to evaluate. Returns ------- RI : float Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for perfect match. See Also -------- adjusted_rand_score: Adjusted Rand Score. adjusted_mutual_info_score: Adjusted Mutual Information. References ---------- .. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions." Journal of Classification 2, 193–218 (1985). <10.1007/BF01908075>`. .. [2] `Wikipedia: Simple Matching Coefficient <https://en.wikipedia.org/wiki/Simple_matching_coefficient>`_ .. [3] `Wikipedia: Rand Index <https://en.wikipedia.org/wiki/Rand_index>`_ Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import rand_score >>> rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized: >>> rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.83... """ contingency = pair_confusion_matrix(labels_true, labels_pred) numerator = contingency.diagonal().sum() denominator = contingency.sum() if numerator == denominator or denominator == 0: # Special limit cases: no clustering since the data is not split; # or trivial clustering where each document is assigned a unique # cluster. These are perfect matches hence return 1.0. return 1.0 return numerator / denominator def adjusted_rand_score(labels_true, labels_pred): """Rand index adjusted for chance. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. The raw RI score is then "adjusted for chance" into the ARI score using the following scheme:: ARI = (RI - Expected_RI) / (max(RI) - Expected_RI) The adjusted Rand index is thus ensured to have a value close to 0.0 for random labeling independently of the number of clusters and samples and exactly 1.0 when the clusterings are identical (up to a permutation). The adjusted Rand index is bounded below by -0.5 for especially discordant clusterings. ARI is a symmetric measure:: adjusted_rand_score(a, b) == adjusted_rand_score(b, a) Read more in the :ref:`User Guide <adjusted_rand_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- ARI : float Similarity score between -0.5 and 1.0. Random labelings have an ARI close to 0.0. 1.0 stands for perfect match. See Also -------- adjusted_mutual_info_score : Adjusted Mutual Information. References ---------- .. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions, Journal of Classification 1985 https://link.springer.com/article/10.1007%2FBF01908075 .. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie adjusted Rand index, Psychological Methods 2004 .. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index .. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size, 2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>` Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import adjusted_rand_score >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized:: >>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.57... ARI is symmetric, so labelings that have pure clusters with members coming from the same classes but unnecessary splits are penalized:: >>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) 0.57... If classes members are completely split across different clusters, the assignment is totally incomplete, hence the ARI is very low:: >>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0 ARI may take a negative value for especially discordant labelings that are a worse choice than the expected value of random labels:: >>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1]) -0.5 """ (tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred) # convert to Python integer types, to avoid overflow or underflow tn, fp, fn, tp = int(tn), int(fp), int(fn), int(tp) # Special cases: empty data or full agreement if fn == 0 and fp == 0: return 1.0 return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn)) def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0): """Compute the homogeneity and completeness and V-Measure scores at once. Those metrics are based on normalized conditional entropy measures of the clustering labeling to evaluate given the knowledge of a Ground Truth class labels of the same samples. A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class. A clustering result satisfies completeness if all the data points that are members of a given class are elements of the same cluster. Both scores have positive values between 0.0 and 1.0, larger values being desirable. Those 3 metrics are independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score values in any way. V-Measure is furthermore symmetric: swapping ``labels_true`` and ``label_pred`` will give the same score. This does not hold for homogeneity and completeness. V-Measure is identical to :func:`normalized_mutual_info_score` with the arithmetic averaging method. Read more in the :ref:`User Guide <homogeneity_completeness>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Gluster labels to evaluate. beta : float, default=1.0 Ratio of weight attributed to ``homogeneity`` vs ``completeness``. If ``beta`` is greater than 1, ``completeness`` is weighted more strongly in the calculation. If ``beta`` is less than 1, ``homogeneity`` is weighted more strongly. Returns ------- homogeneity : float Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling. completeness : float Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling. v_measure : float Harmonic mean of the first two. See Also -------- homogeneity_score : Homogeneity metric of cluster labeling. completeness_score : Completeness metric of cluster labeling. v_measure_score : V-Measure (NMI with arithmetic mean option). """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) if len(labels_true) == 0: return 1.0, 1.0, 1.0 entropy_C = entropy(labels_true) entropy_K = entropy(labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) MI = mutual_info_score(None, None, contingency=contingency) homogeneity = MI / (entropy_C) if entropy_C else 1.0 completeness = MI / (entropy_K) if entropy_K else 1.0 if homogeneity + completeness == 0.0: v_measure_score = 0.0 else: v_measure_score = ( (1 + beta) * homogeneity * completeness / (beta * homogeneity + completeness) ) return homogeneity, completeness, v_measure_score def homogeneity_score(labels_true, labels_pred): """Homogeneity metric of a cluster labeling given a ground truth. A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is not symmetric: switching ``label_true`` with ``label_pred`` will return the :func:`completeness_score` which will be different in general. Read more in the :ref:`User Guide <homogeneity_completeness>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- homogeneity : float Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling. See Also -------- completeness_score : Completeness metric of cluster labeling. v_measure_score : V-Measure (NMI with arithmetic mean option). References ---------- .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A conditional entropy-based external cluster evaluation measure <https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_ Examples -------- Perfect labelings are homogeneous:: >>> from sklearn.metrics.cluster import homogeneity_score >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Non-perfect labelings that further split classes into more clusters can be perfectly homogeneous:: >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2])) 1.000000 >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3])) 1.000000 Clusters that include samples from different classes do not make for an homogeneous labeling:: >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1])) 0.0... >>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0])) 0.0... """ return homogeneity_completeness_v_measure(labels_true, labels_pred)[0] def completeness_score(labels_true, labels_pred): """Compute completeness metric of a cluster labeling given a ground truth. A clustering result satisfies completeness if all the data points that are members of a given class are elements of the same cluster. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is not symmetric: switching ``label_true`` with ``label_pred`` will return the :func:`homogeneity_score` which will be different in general. Read more in the :ref:`User Guide <homogeneity_completeness>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- completeness : float Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling. See Also -------- homogeneity_score : Homogeneity metric of cluster labeling. v_measure_score : V-Measure (NMI with arithmetic mean option). References ---------- .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A conditional entropy-based external cluster evaluation measure <https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_ Examples -------- Perfect labelings are complete:: >>> from sklearn.metrics.cluster import completeness_score >>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Non-perfect labelings that assign all classes members to the same clusters are still complete:: >>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0])) 1.0 >>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1])) 0.999... If classes members are split across different clusters, the assignment cannot be complete:: >>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1])) 0.0 >>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3])) 0.0 """ return homogeneity_completeness_v_measure(labels_true, labels_pred)[1] def v_measure_score(labels_true, labels_pred, *, beta=1.0): """V-measure cluster labeling given a ground truth. This score is identical to :func:`normalized_mutual_info_score` with the ``'arithmetic'`` option for averaging. The V-measure is the harmonic mean between homogeneity and completeness:: v = (1 + beta) * homogeneity * completeness / (beta * homogeneity + completeness) This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <homogeneity_completeness>`. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. beta : float, default=1.0 Ratio of weight attributed to ``homogeneity`` vs ``completeness``. If ``beta`` is greater than 1, ``completeness`` is weighted more strongly in the calculation. If ``beta`` is less than 1, ``homogeneity`` is weighted more strongly. Returns ------- v_measure : float Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling. See Also -------- homogeneity_score : Homogeneity metric of cluster labeling. completeness_score : Completeness metric of cluster labeling. normalized_mutual_info_score : Normalized Mutual Information. References ---------- .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A conditional entropy-based external cluster evaluation measure <https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_ Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import v_measure_score >>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but not homogeneous, hence penalized:: >>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1])) 0.8... >>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1])) 0.66... Labelings that have pure clusters with members coming from the same classes are homogeneous but un-necessary splits harm completeness and thus penalize V-measure as well:: >>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2])) 0.8... >>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3])) 0.66... If classes members are completely split across different clusters, the assignment is totally incomplete, hence the V-Measure is null:: >>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3])) 0.0... Clusters that include samples from totally different classes totally destroy the homogeneity of the labeling, hence:: >>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0])) 0.0... """ return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2] def mutual_info_score(labels_true, labels_pred, *, contingency=None): """Mutual Information between two clusterings. The Mutual Information is a measure of the similarity between two labels of the same data. Where :math:`|U_i|` is the number of the samples in cluster :math:`U_i` and :math:`|V_j|` is the number of the samples in cluster :math:`V_j`, the Mutual Information between clusterings :math:`U` and :math:`V` is given as: .. math:: MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N} \\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|} This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching :math:`U` (i.e ``label_true``) with :math:`V` (i.e. ``label_pred``) will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets, called :math:`U` in the above formula. labels_pred : int array-like of shape (n_samples,) A clustering of the data into disjoint subsets, called :math:`V` in the above formula. contingency : {ndarray, sparse matrix} of shape \ (n_classes_true, n_classes_pred), default=None A contingency matrix given by the :func:`contingency_matrix` function. If value is ``None``, it will be computed, otherwise the given value is used, with ``labels_true`` and ``labels_pred`` ignored. Returns ------- mi : float Mutual information, a non-negative value, measured in nats using the natural logarithm. See Also -------- adjusted_mutual_info_score : Adjusted against chance Mutual Information. normalized_mutual_info_score : Normalized Mutual Information. Notes ----- The logarithm used is the natural logarithm (base-e). """ if contingency is None: labels_true, labels_pred = check_clusterings(labels_true, labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) else: contingency = check_array( contingency, accept_sparse=["csr", "csc", "coo"], dtype=[int, np.int32, np.int64], ) if isinstance(contingency, np.ndarray): # For an array nzx, nzy = np.nonzero(contingency) nz_val = contingency[nzx, nzy] elif sp.issparse(contingency): # For a sparse matrix nzx, nzy, nz_val = sp.find(contingency) else: raise ValueError("Unsupported type for 'contingency': %s" % type(contingency)) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) # Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a # single cluster, implies MI = 0 if pi.size == 1 or pj.size == 1: return 0.0 log_contingency_nm = np.log(nz_val) contingency_nm = nz_val / contingency_sum # Don't need to calculate the full outer product, just for non-zeroes outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype( np.int64, copy=False ) log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum()) mi = ( contingency_nm * (log_contingency_nm - log(contingency_sum)) + contingency_nm * log_outer ) mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi) return np.clip(mi.sum(), 0.0, None) def adjusted_mutual_info_score( labels_true, labels_pred, *, average_method="arithmetic" ): """Adjusted Mutual Information between two clusterings. Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information (MI) score to account for chance. It accounts for the fact that the MI is generally higher for two clusterings with a larger number of clusters, regardless of whether there is actually more information shared. For two clusterings :math:`U` and :math:`V`, the AMI is given as:: AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))] This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching :math:`U` (``label_true``) with :math:`V` (``labels_pred``) will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Be mindful that this function is an order of magnitude slower than other metrics, such as the Adjusted Rand Index. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets, called :math:`U` in the above formula. labels_pred : int array-like of shape (n_samples,) A clustering of the data into disjoint subsets, called :math:`V` in the above formula. average_method : str, default='arithmetic' How to compute the normalizer in the denominator. Possible options are 'min', 'geometric', 'arithmetic', and 'max'. .. versionadded:: 0.20 .. versionchanged:: 0.22 The default value of ``average_method`` changed from 'max' to 'arithmetic'. Returns ------- ami: float (upperlimited by 1.0) The AMI returns a value of 1 when the two partitions are identical (ie perfectly matched). Random partitions (independent labellings) have an expected AMI around 0 on average hence can be negative. The value is in adjusted nats (based on the natural logarithm). See Also -------- adjusted_rand_score : Adjusted Rand Index. mutual_info_score : Mutual Information (not adjusted for chance). References ---------- .. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for Clusterings Comparison: Variants, Properties, Normalization and Correction for Chance, JMLR <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_ .. [2] `Wikipedia entry for the Adjusted Mutual Information <https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_ Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import adjusted_mutual_info_score >>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) ... # doctest: +SKIP 1.0 >>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) ... # doctest: +SKIP 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the AMI is null:: >>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) ... # doctest: +SKIP 0.0 """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # It corresponds to both labellings having zero entropy. # This is a perfect match hence return 1.0. if ( classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0 ): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64, copy=False) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information emi = expected_mutual_information(contingency, n_samples) # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) normalizer = _generalized_average(h_true, h_pred, average_method) denominator = normalizer - emi # Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match. # normalizer should always be >= emi, but because of floating-point # representation, sometimes emi is slightly larger. Correct this # by preserving the sign. if denominator < 0: denominator = min(denominator, -np.finfo("float64").eps) else: denominator = max(denominator, np.finfo("float64").eps) ami = (mi - emi) / denominator return ami def normalized_mutual_info_score( labels_true, labels_pred, *, average_method="arithmetic" ): """Normalized Mutual Information between two clusterings. Normalized Mutual Information (NMI) is a normalization of the Mutual Information (MI) score to scale the results between 0 (no mutual information) and 1 (perfect correlation). In this function, mutual information is normalized by some generalized mean of ``H(labels_true)`` and ``H(labels_pred))``, defined by the `average_method`. This measure is not adjusted for chance. Therefore :func:`adjusted_mutual_info_score` might be preferred. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : int array-like of shape (n_samples,) A clustering of the data into disjoint subsets. average_method : str, default='arithmetic' How to compute the normalizer in the denominator. Possible options are 'min', 'geometric', 'arithmetic', and 'max'. .. versionadded:: 0.20 .. versionchanged:: 0.22 The default value of ``average_method`` changed from 'geometric' to 'arithmetic'. Returns ------- nmi : float Score between 0.0 and 1.0 in normalized nats (based on the natural logarithm). 1.0 stands for perfectly complete labeling. See Also -------- v_measure_score : V-Measure (NMI with arithmetic mean option). adjusted_rand_score : Adjusted Rand Index. adjusted_mutual_info_score : Adjusted Mutual Information (adjusted against chance). Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import normalized_mutual_info_score >>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) ... # doctest: +SKIP 1.0 >>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) ... # doctest: +SKIP 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the NMI is null:: >>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) ... # doctest: +SKIP 0.0 """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # It corresponds to both labellings having zero entropy. # This is a perfect match hence return 1.0. if ( classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0 ): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64, copy=False) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # At this point mi = 0 can't be a perfect match (the special case of a single # cluster has been dealt with before). Hence, if mi = 0, the nmi must be 0 whatever # the normalization. if mi == 0: return 0.0 # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) normalizer = _generalized_average(h_true, h_pred, average_method) return mi / normalizer def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False): """Measure the similarity of two clusterings of a set of points. .. versionadded:: 0.18 The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of the precision and recall:: FMI = TP / sqrt((TP + FP) * (TP + FN)) Where ``TP`` is the number of **True Positive** (i.e. the number of pair of points that belongs in the same clusters in both ``labels_true`` and ``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the number of pair of points that belongs in the same clusters in ``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of **False Negative** (i.e the number of pair of points that belongs in the same clusters in ``labels_pred`` and not in ``labels_True``). The score ranges from 0 to 1. A high value indicates a good similarity between two clusters. Read more in the :ref:`User Guide <fowlkes_mallows_scores>`. Parameters ---------- labels_true : int array, shape = (``n_samples``,) A clustering of the data into disjoint subsets. labels_pred : array, shape = (``n_samples``, ) A clustering of the data into disjoint subsets. sparse : bool, default=False Compute contingency matrix internally with sparse matrix. Returns ------- score : float The resulting Fowlkes-Mallows score. References ---------- .. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two hierarchical clusterings". Journal of the American Statistical Association <https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_ .. [2] `Wikipedia entry for the Fowlkes-Mallows Index <https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_ Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import fowlkes_mallows_score >>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 If classes members are completely split across different clusters, the assignment is totally random, hence the FMI is null:: >>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0 """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) (n_samples,) = labels_true.shape c = contingency_matrix(labels_true, labels_pred, sparse=True) c = c.astype(np.int64, copy=False) tk = np.dot(c.data, c.data) - n_samples pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0 def entropy(labels): """Calculate the entropy for a labeling. Parameters ---------- labels : array-like of shape (n_samples,), dtype=int The labels. Returns ------- entropy : float The entropy for a labeling. Notes ----- The logarithm used is the natural logarithm (base-e). """ if len(labels) == 0: return 1.0 label_idx = np.unique(labels, return_inverse=True)[1] pi = np.bincount(label_idx).astype(np.float64) pi = pi[pi > 0] # single cluster => zero entropy if pi.size == 1: return 0.0 pi_sum = np.sum(pi) # log(a / b) should be calculated as log(a) - log(b) for # possible loss of precision return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
bsd-3-clause
jwlawson/tensorflow
tensorflow/examples/learn/hdf5_classification.py
74
2899
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset, hdf5 format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf import h5py # pylint: disable=g-bad-import-order X_FEATURE = 'x' # Name of the input feature. def main(unused_argv): # Load dataset. iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) # Note that we are saving and load iris data as h5 format as a simple # demonstration here. h5f = h5py.File('/tmp/test_hdf5.h5', 'w') h5f.create_dataset('X_train', data=x_train) h5f.create_dataset('X_test', data=x_test) h5f.create_dataset('y_train', data=y_train) h5f.create_dataset('y_test', data=y_test) h5f.close() h5f = h5py.File('/tmp/test_hdf5.h5', 'r') x_train = np.array(h5f['X_train']) x_test = np.array(h5f['X_test']) y_train = np.array(h5f['y_train']) y_test = np.array(h5f['y_test']) # Build 3 layer DNN with 10, 20, 10 units respectively. feature_columns = [ tf.feature_column.numeric_column( X_FEATURE, shape=np.array(x_train).shape[1:])] classifier = tf.estimator.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3) # Train. train_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=200) # Predict. test_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class_ids'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': tf.app.run()
apache-2.0
vinayak-mehta/scikit-learn
sklearn/decomposition/_incremental_pca.py
7
15744
"""Incremental Principal Components Analysis.""" # Author: Kyle Kastner <kastnerkyle@gmail.com> # Giorgio Patrini # License: BSD 3 clause from numbers import Integral import numpy as np from scipy import linalg, sparse from ._base import _BasePCA from ..utils import gen_batches from ..utils._param_validation import Interval from ..utils.extmath import svd_flip, _incremental_mean_and_var class IncrementalPCA(_BasePCA): """Incremental principal components analysis (IPCA). Linear dimensionality reduction using Singular Value Decomposition of the data, keeping only the most significant singular vectors to project the data to a lower dimensional space. The input data is centered but not scaled for each feature before applying the SVD. Depending on the size of the input data, this algorithm can be much more memory efficient than a PCA, and allows sparse input. This algorithm has constant memory complexity, on the order of ``batch_size * n_features``, enabling use of np.memmap files without loading the entire file into memory. For sparse matrices, the input is converted to dense in batches (in order to be able to subtract the mean) which avoids storing the entire dense matrix at any one time. The computational overhead of each SVD is ``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples remain in memory at a time. There will be ``n_samples / batch_size`` SVD computations to get the principal components, versus 1 large SVD of complexity ``O(n_samples * n_features ** 2)`` for PCA. Read more in the :ref:`User Guide <IncrementalPCA>`. .. versionadded:: 0.16 Parameters ---------- n_components : int, default=None Number of components to keep. If ``n_components`` is ``None``, then ``n_components`` is set to ``min(n_samples, n_features)``. whiten : bool, default=False When True (False by default) the ``components_`` vectors are divided by ``n_samples`` times ``components_`` to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometimes improve the predictive accuracy of the downstream estimators by making data respect some hard-wired assumptions. copy : bool, default=True If False, X will be overwritten. ``copy=False`` can be used to save memory but is unsafe for general use. batch_size : int, default=None The number of samples to use for each batch. Only used when calling ``fit``. If ``batch_size`` is ``None``, then ``batch_size`` is inferred from the data and set to ``5 * n_features``, to provide a balance between approximation accuracy and memory consumption. Attributes ---------- components_ : ndarray of shape (n_components, n_features) Principal axes in feature space, representing the directions of maximum variance in the data. Equivalently, the right singular vectors of the centered input data, parallel to its eigenvectors. The components are sorted by decreasing ``explained_variance_``. explained_variance_ : ndarray of shape (n_components,) Variance explained by each of the selected components. explained_variance_ratio_ : ndarray of shape (n_components,) Percentage of variance explained by each of the selected components. If all components are stored, the sum of explained variances is equal to 1.0. singular_values_ : ndarray of shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. mean_ : ndarray of shape (n_features,) Per-feature empirical mean, aggregate over calls to ``partial_fit``. var_ : ndarray of shape (n_features,) Per-feature empirical variance, aggregate over calls to ``partial_fit``. noise_variance_ : float The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. n_components_ : int The estimated number of components. Relevant when ``n_components=None``. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. batch_size_ : int Inferred batch size from ``batch_size``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- PCA : Principal component analysis (PCA). KernelPCA : Kernel Principal component analysis (KPCA). SparsePCA : Sparse Principal Components Analysis (SparsePCA). TruncatedSVD : Dimensionality reduction using truncated SVD. Notes ----- Implements the incremental PCA model from: *D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008.* See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf This model is an extension of the Sequential Karhunen-Loeve Transform from: :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and its Application to Images, IEEE Transactions on Image Processing, Volume 9, Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>` We have specifically abstained from an optimization used by authors of both papers, a QR decomposition used in specific situations to reduce the algorithmic complexity of the SVD. The source for this technique is *Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5, section 5.4.4, pp 252-253.*. This technique has been omitted because it is advantageous only when decomposing a matrix with ``n_samples`` (rows) >= 5/3 * ``n_features`` (columns), and hurts the readability of the implemented algorithm. This would be a good opportunity for future optimization, if it is deemed necessary. References ---------- D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3, pp. 125-141, May 2008. G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5, Section 5.4.4, pp. 252-253. Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.decomposition import IncrementalPCA >>> from scipy import sparse >>> X, _ = load_digits(return_X_y=True) >>> transformer = IncrementalPCA(n_components=7, batch_size=200) >>> # either partially fit on smaller batches of data >>> transformer.partial_fit(X[:100, :]) IncrementalPCA(batch_size=200, n_components=7) >>> # or let the fit function itself divide the data into batches >>> X_sparse = sparse.csr_matrix(X) >>> X_transformed = transformer.fit_transform(X_sparse) >>> X_transformed.shape (1797, 7) """ _parameter_constraints: dict = { "n_components": [Interval(Integral, 1, None, closed="left"), None], "whiten": ["boolean"], "copy": ["boolean"], "batch_size": [Interval(Integral, 1, None, closed="left"), None], } def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None): self.n_components = n_components self.whiten = whiten self.copy = copy self.batch_size = batch_size def fit(self, X, y=None): """Fit the model with X, using minibatches of size batch_size. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ self._validate_params() self.components_ = None self.n_samples_seen_ = 0 self.mean_ = 0.0 self.var_ = 0.0 self.singular_values_ = None self.explained_variance_ = None self.explained_variance_ratio_ = None self.noise_variance_ = None X = self._validate_data( X, accept_sparse=["csr", "csc", "lil"], copy=self.copy, dtype=[np.float64, np.float32], ) n_samples, n_features = X.shape if self.batch_size is None: self.batch_size_ = 5 * n_features else: self.batch_size_ = self.batch_size for batch in gen_batches( n_samples, self.batch_size_, min_batch_size=self.n_components or 0 ): X_batch = X[batch] if sparse.issparse(X_batch): X_batch = X_batch.toarray() self.partial_fit(X_batch, check_input=False) return self def partial_fit(self, X, y=None, check_input=True): """Incremental fit with X. All of X is processed as a single batch. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. check_input : bool, default=True Run check_array on X. Returns ------- self : object Returns the instance itself. """ first_pass = not hasattr(self, "components_") if first_pass: self._validate_params() if check_input: if sparse.issparse(X): raise TypeError( "IncrementalPCA.partial_fit does not support " "sparse input. Either convert data to dense " "or use IncrementalPCA.fit to do so in batches." ) X = self._validate_data( X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass ) n_samples, n_features = X.shape if first_pass: self.components_ = None if self.n_components is None: if self.components_ is None: self.n_components_ = min(n_samples, n_features) else: self.n_components_ = self.components_.shape[0] elif not self.n_components <= n_features: raise ValueError( "n_components=%r invalid for n_features=%d, need " "more rows than columns for IncrementalPCA " "processing" % (self.n_components, n_features) ) elif not self.n_components <= n_samples: raise ValueError( "n_components=%r must be less or equal to " "the batch number of samples " "%d." % (self.n_components, n_samples) ) else: self.n_components_ = self.n_components if (self.components_ is not None) and ( self.components_.shape[0] != self.n_components_ ): raise ValueError( "Number of input features has changed from %i " "to %i between calls to partial_fit! Try " "setting n_components to a fixed value." % (self.components_.shape[0], self.n_components_) ) # This is the first partial_fit if not hasattr(self, "n_samples_seen_"): self.n_samples_seen_ = 0 self.mean_ = 0.0 self.var_ = 0.0 # Update stats - they are 0 if this is the first step col_mean, col_var, n_total_samples = _incremental_mean_and_var( X, last_mean=self.mean_, last_variance=self.var_, last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]), ) n_total_samples = n_total_samples[0] # Whitening if self.n_samples_seen_ == 0: # If it is the first step, simply whiten X X -= col_mean else: col_batch_mean = np.mean(X, axis=0) X -= col_batch_mean # Build matrix of combined previous basis and new data mean_correction = np.sqrt( (self.n_samples_seen_ / n_total_samples) * n_samples ) * (self.mean_ - col_batch_mean) X = np.vstack( ( self.singular_values_.reshape((-1, 1)) * self.components_, X, mean_correction, ) ) U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False) U, Vt = svd_flip(U, Vt, u_based_decision=False) explained_variance = S**2 / (n_total_samples - 1) explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples) self.n_samples_seen_ = n_total_samples self.components_ = Vt[: self.n_components_] self.singular_values_ = S[: self.n_components_] self.mean_ = col_mean self.var_ = col_var self.explained_variance_ = explained_variance[: self.n_components_] self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_] # we already checked `self.n_components <= n_samples` above if self.n_components_ not in (n_samples, n_features): self.noise_variance_ = explained_variance[self.n_components_ :].mean() else: self.noise_variance_ = 0.0 return self def transform(self, X): """Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set, using minibatches of size batch_size if X is sparse. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : ndarray of shape (n_samples, n_components) Projection of X in the first principal components. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import IncrementalPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], ... [1, 1], [2, 1], [3, 2]]) >>> ipca = IncrementalPCA(n_components=2, batch_size=3) >>> ipca.fit(X) IncrementalPCA(batch_size=3, n_components=2) >>> ipca.transform(X) # doctest: +SKIP """ if sparse.issparse(X): n_samples = X.shape[0] output = [] for batch in gen_batches( n_samples, self.batch_size_, min_batch_size=self.n_components or 0 ): output.append(super().transform(X[batch].toarray())) return np.vstack(output) else: return super().transform(X)
bsd-3-clause
iemejia/incubator-beam
sdks/python/apache_beam/examples/fastavro_it_test.py
1
6394
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """End-to-end test for Avro IO's fastavro support. Writes a configurable number of records to a temporary location with each of {avro,fastavro}, then reads them back in, joins the two read datasets, and verifies they have the same elements. Usage: DataFlowRunner: python setup.py nosetests --tests apache_beam.examples.fastavro_it_test \ --test-pipeline-options=" --runner=TestDataflowRunner --project=... --region=... --staging_location=gs://... --temp_location=gs://... --output=gs://... --sdk_location=... " DirectRunner: python setup.py nosetests --tests apache_beam.examples.fastavro_it_test \ --test-pipeline-options=" --output=/tmp --records=5000 " """ # pytype: skip-file from __future__ import absolute_import from __future__ import division import json import logging import unittest import uuid from fastavro import parse_schema from nose.plugins.attrib import attr from apache_beam.io.avroio import ReadAllFromAvro from apache_beam.io.avroio import WriteToAvro from apache_beam.runners.runner import PipelineState from apache_beam.testing.test_pipeline import TestPipeline from apache_beam.testing.test_utils import delete_files from apache_beam.testing.util import BeamAssertException from apache_beam.transforms.core import Create from apache_beam.transforms.core import FlatMap from apache_beam.transforms.core import Map from apache_beam.transforms.util import CoGroupByKey # pylint: disable=wrong-import-order, wrong-import-position try: from avro.schema import Parse # avro-python3 library for python3 except ImportError: from avro.schema import parse as Parse # avro library for python2 # pylint: enable=wrong-import-order, wrong-import-position LABELS = ['abc', 'def', 'ghi', 'jkl', 'mno', 'pqr', 'stu', 'vwx'] COLORS = ['RED', 'ORANGE', 'YELLOW', 'GREEN', 'BLUE', 'PURPLE', None] def record(i): return { 'label': LABELS[i % len(LABELS)], 'number': i, 'number_str': str(i), 'color': COLORS[i % len(COLORS)] } class FastavroIT(unittest.TestCase): SCHEMA_STRING = ''' {"namespace": "example.avro", "type": "record", "name": "User", "fields": [ {"name": "label", "type": "string"}, {"name": "number", "type": ["int", "null"]}, {"name": "number_str", "type": ["string", "null"]}, {"name": "color", "type": ["string", "null"]} ] } ''' def setUp(self): self.test_pipeline = TestPipeline(is_integration_test=True) self.uuid = str(uuid.uuid4()) self.output = '/'.join([self.test_pipeline.get_option('output'), self.uuid]) @attr('IT') def test_avro_it(self): num_records = self.test_pipeline.get_option('records') num_records = int(num_records) if num_records else 1000000 # Seed a `PCollection` with indices that will each be FlatMap'd into # `batch_size` records, to avoid having a too-large list in memory at # the outset batch_size = self.test_pipeline.get_option('batch-size') batch_size = int(batch_size) if batch_size else 10000 # pylint: disable=range-builtin-not-iterating batches = range(int(num_records / batch_size)) def batch_indices(start): # pylint: disable=range-builtin-not-iterating return range(start * batch_size, (start + 1) * batch_size) # A `PCollection` with `num_records` avro records records_pcoll = \ self.test_pipeline \ | 'create-batches' >> Create(batches) \ | 'expand-batches' >> FlatMap(batch_indices) \ | 'create-records' >> Map(record) fastavro_output = '/'.join([self.output, 'fastavro']) avro_output = '/'.join([self.output, 'avro']) # pylint: disable=expression-not-assigned records_pcoll \ | 'write_fastavro' >> WriteToAvro( fastavro_output, parse_schema(json.loads(self.SCHEMA_STRING)), use_fastavro=True ) # pylint: disable=expression-not-assigned records_pcoll \ | 'write_avro' >> WriteToAvro( avro_output, Parse(self.SCHEMA_STRING), use_fastavro=False ) result = self.test_pipeline.run() result.wait_until_finish() assert result.state == PipelineState.DONE with TestPipeline(is_integration_test=True) as fastavro_read_pipeline: fastavro_records = \ fastavro_read_pipeline \ | 'create-fastavro' >> Create(['%s*' % fastavro_output]) \ | 'read-fastavro' >> ReadAllFromAvro(use_fastavro=True) \ | Map(lambda rec: (rec['number'], rec)) avro_records = \ fastavro_read_pipeline \ | 'create-avro' >> Create(['%s*' % avro_output]) \ | 'read-avro' >> ReadAllFromAvro(use_fastavro=False) \ | Map(lambda rec: (rec['number'], rec)) def check(elem): v = elem[1] def assertEqual(l, r): if l != r: raise BeamAssertException('Assertion failed: %s == %s' % (l, r)) assertEqual(sorted(v.keys()), ['avro', 'fastavro']) avro_values = v['avro'] fastavro_values = v['fastavro'] assertEqual(avro_values, fastavro_values) assertEqual(len(avro_values), 1) # pylint: disable=expression-not-assigned { 'avro': avro_records, 'fastavro': fastavro_records } \ | CoGroupByKey() \ | Map(check) self.addCleanup(delete_files, [self.output]) assert result.state == PipelineState.DONE if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG) unittest.main()
apache-2.0
shishaochen/TensorFlow-0.8-Win
third_party/grpc/tools/gcp/utils/big_query_utils.py
42
5378
#!/usr/bin/env python2.7 # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse import json import uuid import httplib2 from apiclient import discovery from apiclient.errors import HttpError from oauth2client.client import GoogleCredentials NUM_RETRIES = 3 def create_big_query(): """Authenticates with cloud platform and gets a BiqQuery service object """ creds = GoogleCredentials.get_application_default() return discovery.build('bigquery', 'v2', credentials=creds) def create_dataset(biq_query, project_id, dataset_id): is_success = True body = { 'datasetReference': { 'projectId': project_id, 'datasetId': dataset_id } } try: dataset_req = biq_query.datasets().insert(projectId=project_id, body=body) dataset_req.execute(num_retries=NUM_RETRIES) except HttpError as http_error: if http_error.resp.status == 409: print 'Warning: The dataset %s already exists' % dataset_id else: # Note: For more debugging info, print "http_error.content" print 'Error in creating dataset: %s. Err: %s' % (dataset_id, http_error) is_success = False return is_success def create_table(big_query, project_id, dataset_id, table_id, table_schema, description): fields = [{'name': field_name, 'type': field_type, 'description': field_description } for (field_name, field_type, field_description) in table_schema] return create_table2(big_query, project_id, dataset_id, table_id, fields, description) def create_table2(big_query, project_id, dataset_id, table_id, fields_schema, description): is_success = True body = { 'description': description, 'schema': { 'fields': fields_schema }, 'tableReference': { 'datasetId': dataset_id, 'projectId': project_id, 'tableId': table_id } } try: table_req = big_query.tables().insert(projectId=project_id, datasetId=dataset_id, body=body) res = table_req.execute(num_retries=NUM_RETRIES) print 'Successfully created %s "%s"' % (res['kind'], res['id']) except HttpError as http_error: if http_error.resp.status == 409: print 'Warning: Table %s already exists' % table_id else: print 'Error in creating table: %s. Err: %s' % (table_id, http_error) is_success = False return is_success def insert_rows(big_query, project_id, dataset_id, table_id, rows_list): is_success = True body = {'rows': rows_list} try: insert_req = big_query.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body) res = insert_req.execute(num_retries=NUM_RETRIES) if res.get('insertErrors', None): print 'Error inserting rows! Response: %s' % res is_success = False except HttpError as http_error: print 'Error inserting rows to the table %s' % table_id is_success = False return is_success def sync_query_job(big_query, project_id, query, timeout=5000): query_data = {'query': query, 'timeoutMs': timeout} query_job = None try: query_job = big_query.jobs().query( projectId=project_id, body=query_data).execute(num_retries=NUM_RETRIES) except HttpError as http_error: print 'Query execute job failed with error: %s' % http_error print http_error.content return query_job # List of (column name, column type, description) tuples def make_row(unique_row_id, row_values_dict): """row_values_dict is a dictionary of column name and column value. """ return {'insertId': unique_row_id, 'json': row_values_dict}
apache-2.0
jwlawson/tensorflow
tensorflow/contrib/learn/python/learn/estimators/__init__.py
33
12484
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """An estimator is a rule for calculating an estimate of a given quantity. # Estimators * **Estimators** are used to train and evaluate TensorFlow models. They support regression and classification problems. * **Classifiers** are functions that have discrete outcomes. * **Regressors** are functions that predict continuous values. ## Choosing the correct estimator * For **Regression** problems use one of the following: * `LinearRegressor`: Uses linear model. * `DNNRegressor`: Uses DNN. * `DNNLinearCombinedRegressor`: Uses Wide & Deep. * `TensorForestEstimator`: Uses RandomForest. See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator. * `Estimator`: Use when you need a custom model. * For **Classification** problems use one of the following: * `LinearClassifier`: Multiclass classifier using Linear model. * `DNNClassifier`: Multiclass classifier using DNN. * `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep. * `TensorForestEstimator`: Uses RandomForest. See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator. * `SVM`: Binary classifier using linear SVMs. * `LogisticRegressor`: Use when you need custom model for binary classification. * `Estimator`: Use when you need custom model for N class classification. ## Pre-canned Estimators Pre-canned estimators are machine learning estimators premade for general purpose problems. If you need more customization, you can always write your own custom estimator as described in the section below. Pre-canned estimators are tested and optimized for speed and quality. ### Define the feature columns Here are some possible types of feature columns used as inputs to a pre-canned estimator. Feature columns may vary based on the estimator used. So you can see which feature columns are fed to each estimator in the below section. ```python sparse_feature_a = sparse_column_with_keys( column_name="sparse_feature_a", keys=["AB", "CD", ...]) embedding_feature_a = embedding_column( sparse_id_column=sparse_feature_a, dimension=3, combiner="sum") sparse_feature_b = sparse_column_with_hash_bucket( column_name="sparse_feature_b", hash_bucket_size=1000) embedding_feature_b = embedding_column( sparse_id_column=sparse_feature_b, dimension=16, combiner="sum") crossed_feature_a_x_b = crossed_column( columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000) real_feature = real_valued_column("real_feature") real_feature_buckets = bucketized_column( source_column=real_feature, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) ``` ### Create the pre-canned estimator DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty similar to each other in how you use them. You can easily plug in an optimizer and/or regularization to those estimators. #### DNNClassifier A classifier for TensorFlow DNN models. ```python my_features = [embedding_feature_a, embedding_feature_b] estimator = DNNClassifier( feature_columns=my_features, hidden_units=[1024, 512, 256], optimizer=tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001 )) ``` #### DNNRegressor A regressor for TensorFlow DNN models. ```python my_features = [embedding_feature_a, embedding_feature_b] estimator = DNNRegressor( feature_columns=my_features, hidden_units=[1024, 512, 256]) # Or estimator using the ProximalAdagradOptimizer optimizer with # regularization. estimator = DNNRegressor( feature_columns=my_features, hidden_units=[1024, 512, 256], optimizer=tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001 )) ``` #### DNNLinearCombinedClassifier A classifier for TensorFlow Linear and DNN joined training models. * Wide and deep model * Multi class (2 by default) ```python my_linear_features = [crossed_feature_a_x_b] my_deep_features = [embedding_feature_a, embedding_feature_b] estimator = DNNLinearCombinedClassifier( # Common settings n_classes=n_classes, weight_column_name=weight_column_name, # Wide settings linear_feature_columns=my_linear_features, linear_optimizer=tf.train.FtrlOptimizer(...), # Deep settings dnn_feature_columns=my_deep_features, dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.AdagradOptimizer(...)) ``` #### LinearClassifier Train a linear model to classify instances into one of multiple possible classes. When number of possible classes is 2, this is binary classification. ```python my_features = [sparse_feature_b, crossed_feature_a_x_b] estimator = LinearClassifier( feature_columns=my_features, optimizer=tf.train.FtrlOptimizer( learning_rate=0.1, l1_regularization_strength=0.001 )) ``` #### LinearRegressor Train a linear regression model to predict a label value given observation of feature values. ```python my_features = [sparse_feature_b, crossed_feature_a_x_b] estimator = LinearRegressor( feature_columns=my_features) ``` ### LogisticRegressor Logistic regression estimator for binary classification. ```python # See tf.contrib.learn.Estimator(...) for details on model_fn structure def my_model_fn(...): pass estimator = LogisticRegressor(model_fn=my_model_fn) # Input builders def input_fn_train: pass estimator.fit(input_fn=input_fn_train) estimator.predict(x=x) ``` #### SVM - Support Vector Machine Support Vector Machine (SVM) model for binary classification. Currently only linear SVMs are supported. ```python my_features = [real_feature, sparse_feature_a] estimator = SVM( example_id_column='example_id', feature_columns=my_features, l2_regularization=10.0) ``` #### DynamicRnnEstimator An `Estimator` that uses a recurrent neural network with dynamic unrolling. ```python problem_type = ProblemType.CLASSIFICATION # or REGRESSION prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE estimator = DynamicRnnEstimator(problem_type, prediction_type, my_feature_columns) ``` ### Use the estimator There are two main functions for using estimators, one of which is for training, and one of which is for evaluation. You can specify different data sources for each one in order to use different datasets for train and eval. ```python # Input builders def input_fn_train: # returns x, Y ... estimator.fit(input_fn=input_fn_train) def input_fn_eval: # returns x, Y ... estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) ``` ## Creating Custom Estimator To create a custom `Estimator`, provide a function to `Estimator`'s constructor that builds your model (`model_fn`, below): ```python estimator = tf.contrib.learn.Estimator( model_fn=model_fn, model_dir=model_dir) # Where the model's data (e.g., checkpoints) # are saved. ``` Here is a skeleton of this function, with descriptions of its arguments and return values in the accompanying tables: ```python def model_fn(features, targets, mode, params): # Logic to do the following: # 1. Configure the model via TensorFlow operations # 2. Define the loss function for training/evaluation # 3. Define the training operation/optimizer # 4. Generate predictions return predictions, loss, train_op ``` You may use `mode` and check against `tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`. In the Further Reading section below, there is an end-to-end TensorFlow tutorial for building a custom estimator. ## Additional Estimators There is an additional estimators under `tensorflow.contrib.factorization.python.ops`: * Gaussian mixture model (GMM) clustering ## Further reading For further reading, there are several tutorials with relevant topics, including: * [Overview of linear models](../../../tutorials/linear/overview.md) * [Linear model tutorial](../../../tutorials/wide/index.md) * [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md) * [Custom estimator tutorial](../../../tutorials/estimators/index.md) * [Building input functions](../../../tutorials/input_fn/index.md) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head from tensorflow.contrib.learn.python.learn.estimators.head import Head from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head from tensorflow.contrib.learn.python.learn.estimators.head import multi_head from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head from tensorflow.contrib.learn.python.learn.estimators.head import regression_head from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
apache-2.0
HighEnergyDataScientests/bnpcompetition
scripts/BNP_main.py
1
1995
#!/usr/bin/python ################################################################################################################### ### This code is developed by DataTistics team on kaggle ### Do not copy or modify without written approval from one of the team members. ################################################################################################################### # ############################### This is BNP_main script which calls to functions from modules ##################### import time import pandas as pd # Custom modules #from datapreprocessing import data_processing from data_preproc_XGBoost import data_proc_XGBoost from two_layer_training import First_layer_classifiers, Second_layer_ensembling # load the datasets print("## Loading Data") models_predictions_file = "../predictions/models_predictions.csv" train = pd.read_csv('../inputs/train.csv') test = pd.read_csv('../inputs/test.csv') ### Controlling Parameters output_col_name = "target" test_col_name = "PredictedProb" id_col_name = "ID" test_size_test = 0.2 test_size_val = test_size_test/(1.0 - test_size_test) ## Running data preprocessing #X, X_train, X_valid, X_test, y, y_train, y_valid, y_test, temp_test = data_processing(models_predictions_file,train,test, output_col_name,test_col_name,id_col_name,test_size_test,test_size_val) X, X_train, X_valid, X_test, y, y_train, y_valid, y_test, temp_test = data_proc_XGBoost(models_predictions_file,train,test, output_col_name,test_col_name,id_col_name,test_size_test,test_size_val) p_valid, p_test, p_ttest_t = First_layer_classifiers(X, X_train, X_valid, X_test, y, y_train, y_valid, y_test, temp_test) yt_out = Second_layer_ensembling(p_valid, p_test, y_valid, y_test, p_ttest_t) # Producing output of predicted probabilities and writing it into a file timestamp = time.strftime("%Y%m%d-%H%M%S") test[test_col_name] = yt_out[:,1] test[[id_col_name,test_col_name]].to_csv("../predictions/2layer_yt_out" + timestamp + ".csv", index=False)
apache-2.0
chanceraine/nupic
examples/opf/experiments/multistep/base/permutations_simple_3.py
35
4155
#! /usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Template file used by ExpGenerator to generate the actual permutations.py file by replacing $XXXXXXXX tokens with desired values. This permutations.py file was generated by: '~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc' """ import os from pkg_resources import resources_filename from nupic.swarming.permutationhelpers import * # The name of the field being predicted. Any allowed permutation MUST contain # the prediction field. # (generated from PREDICTION_FIELD) predictedField = 'field2' permutations = { # Encoder permutation choices # Example: # # '__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7, # n=100), # # '__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder', # w=7, n=100), # # '__timestamp_timeOfDay_encoder' : PermuteEncoder('timestamp', # 'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])), # # '__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp', # 'DateEncoder.dayOfWeek', w=7, radius=PermuteChoices([1, 3])), # # '__consumption_encoder' : PermuteEncoder('consumption', 'ScalarEncoder', # w=7, n=PermuteInt(13, 500, 20), minval=0, # maxval=PermuteInt(100, 300, 25)), # # (generated from PERM_ENCODER_CHOICES) 'predictedField': 'field2', 'predictionSteps': [1,3], relativePath = os.path.join("examples", "opf", "experiments", "multistep", "datasets", "simple_3.csv") 'dataSource': 'file://%s' % (resource_filename("nupic", relativePath)), '__field2_encoder' : PermuteEncoder(fieldName='field2', clipInput=True, minval = 0, maxval=50, encoderClass='ScalarEncoder', w=21, n=PermuteChoices([500])), } # Fields selected for final hypersearch report; # NOTE: These values are used as regular expressions by RunPermutations.py's # report generator # (fieldname values generated from PERM_PREDICTED_FIELD_NAME) report = [ '.*field2.*', ] # Permutation optimization setting: either minimize or maximize metric # used by RunPermutations. # NOTE: The value is used as a regular expressions by RunPermutations.py's # report generator # (generated from minimize = 'prediction:aae:window=1000:field=consumption') minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2" def permutationFilter(perm): """ This function can be used to selectively filter out specific permutation combinations. It is called by RunPermutations for every possible permutation of the variables in the permutations dict. It should return True for valid a combination of permutation values and False for an invalid one. Parameters: --------------------------------------------------------- perm: dict of one possible combination of name:value pairs chosen from permutations. """ # An example of how to use this #if perm['__consumption_encoder']['maxval'] > 300: # return False; # return True
agpl-3.0
procoder317/scikit-learn
sklearn/preprocessing/__init__.py
265
1319
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from ._function_transformer import FunctionTransformer from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import minmax_scale from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'FunctionTransformer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', ]
bsd-3-clause
procoder317/scikit-learn
benchmarks/bench_glm.py
295
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
jwlawson/tensorflow
tensorflow/contrib/eager/python/examples/mnist/mnist.py
2
9218
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A deep MNIST classifier using convolutional layers. Sample usage: python mnist.py --help """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import functools import os import sys import time import tensorflow as tf import tensorflow.contrib.eager as tfe from tensorflow.examples.tutorials.mnist import input_data FLAGS = None class MNISTModel(tfe.Network): """MNIST Network. Network structure is equivalent to: https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py and https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py But written using the tf.layers API. """ def __init__(self, data_format): """Creates a model for classifying a hand-written digit. Args: data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is typically faster on GPUs while 'channels_last' is typically faster on CPUs. See https://www.tensorflow.org/performance/performance_guide#data_formats """ super(MNISTModel, self).__init__(name='') if data_format == 'channels_first': self._input_shape = [-1, 1, 28, 28] else: assert data_format == 'channels_last' self._input_shape = [-1, 28, 28, 1] self.conv1 = self.track_layer( tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu)) self.conv2 = self.track_layer( tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu)) self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu)) self.fc2 = self.track_layer(tf.layers.Dense(10)) self.dropout = self.track_layer(tf.layers.Dropout(0.5)) self.max_pool2d = self.track_layer( tf.layers.MaxPooling2D( (2, 2), (2, 2), padding='SAME', data_format=data_format)) def call(self, inputs, training): """Computes labels from inputs. Users should invoke __call__ to run the network, which delegates to this method (and not call this method directly). Args: inputs: A batch of images as a Tensor with shape [batch_size, 784]. training: True if invoked in the context of training (causing dropout to be applied). False otherwise. Returns: A Tensor with shape [batch_size, 10] containing the predicted logits for each image in the batch, for each of the 10 classes. """ x = tf.reshape(inputs, self._input_shape) x = self.conv1(x) x = self.max_pool2d(x) x = self.conv2(x) x = self.max_pool2d(x) x = tf.layers.flatten(x) x = self.fc1(x) if training: x = self.dropout(x) x = self.fc2(x) return x def loss(predictions, labels): return tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits=predictions, labels=labels)) def compute_accuracy(predictions, labels): return tf.reduce_sum( tf.cast( tf.equal( tf.argmax(predictions, axis=1, output_type=tf.int64), tf.argmax(labels, axis=1, output_type=tf.int64)), dtype=tf.float32)) / float(predictions.shape[0].value) def train_one_epoch(model, optimizer, dataset, log_interval=None): """Trains model on `dataset` using `optimizer`.""" tf.train.get_or_create_global_step() def model_loss(labels, images): prediction = model(images, training=True) loss_value = loss(prediction, labels) tf.contrib.summary.scalar('loss', loss_value) tf.contrib.summary.scalar('accuracy', compute_accuracy(prediction, labels)) return loss_value for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)): with tf.contrib.summary.record_summaries_every_n_global_steps(10): batch_model_loss = functools.partial(model_loss, labels, images) optimizer.minimize( batch_model_loss, global_step=tf.train.get_global_step()) if log_interval and batch % log_interval == 0: print('Batch #%d\tLoss: %.6f' % (batch, batch_model_loss())) def test(model, dataset): """Perform an evaluation of `model` on the examples from `dataset`.""" avg_loss = tfe.metrics.Mean('loss') accuracy = tfe.metrics.Accuracy('accuracy') for (images, labels) in tfe.Iterator(dataset): predictions = model(images, training=False) avg_loss(loss(predictions, labels)) accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64), tf.argmax(labels, axis=1, output_type=tf.int64)) print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' % (avg_loss.result(), 100 * accuracy.result())) with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('loss', avg_loss.result()) tf.contrib.summary.scalar('accuracy', accuracy.result()) def load_data(data_dir): """Returns training and test tf.data.Dataset objects.""" data = input_data.read_data_sets(data_dir, one_hot=True) train_ds = tf.data.Dataset.from_tensor_slices((data.train.images, data.train.labels)) test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels)) return (train_ds, test_ds) def main(_): tfe.enable_eager_execution() (device, data_format) = ('/gpu:0', 'channels_first') if FLAGS.no_gpu or tfe.num_gpus() <= 0: (device, data_format) = ('/cpu:0', 'channels_last') print('Using device %s, and data format %s.' % (device, data_format)) # Load the datasets (train_ds, test_ds) = load_data(FLAGS.data_dir) train_ds = train_ds.shuffle(60000).batch(FLAGS.batch_size) # Create the model and optimizer model = MNISTModel(data_format) optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum) if FLAGS.output_dir: train_dir = os.path.join(FLAGS.output_dir, 'train') test_dir = os.path.join(FLAGS.output_dir, 'eval') tf.gfile.MakeDirs(FLAGS.output_dir) else: train_dir = None test_dir = None summary_writer = tf.contrib.summary.create_file_writer( train_dir, flush_millis=10000) test_summary_writer = tf.contrib.summary.create_file_writer( test_dir, flush_millis=10000, name='test') checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt') with tf.device(device): for epoch in range(1, 11): with tfe.restore_variables_on_create( tf.train.latest_checkpoint(FLAGS.checkpoint_dir)): global_step = tf.train.get_or_create_global_step() start = time.time() with summary_writer.as_default(): train_one_epoch(model, optimizer, train_ds, FLAGS.log_interval) end = time.time() print('\nTrain time for epoch #%d (global step %d): %f' % ( epoch, global_step.numpy(), end - start)) with test_summary_writer.as_default(): test(model, test_ds) all_variables = ( model.variables + optimizer.variables() + [global_step]) tfe.Saver(all_variables).save( checkpoint_prefix, global_step=global_step) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--data-dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') parser.add_argument( '--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument( '--output_dir', type=str, default=None, metavar='N', help='Directory to write TensorBoard summaries') parser.add_argument( '--checkpoint_dir', type=str, default='/tmp/tensorflow/mnist/checkpoints/', metavar='N', help='Directory to save checkpoints in (once per epoch)') parser.add_argument( '--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument( '--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument( '--no-gpu', action='store_true', default=False, help='disables GPU usage even if a GPU is available') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
explosion/thinc
thinc/shims/pytorch.py
1
7526
from typing import Any, Optional, cast import contextlib from io import BytesIO import itertools import srsly from ..util import torch2xp, xp2torch, convert_recursive, iterate_recursive from ..util import get_torch_default_device from ..compat import torch from ..backends import get_current_ops, context_pools, CupyOps from ..backends import set_gpu_allocator from ..optimizers import Optimizer from ..types import ArgsKwargs, FloatsXd from .pytorch_grad_scaler import PyTorchGradScaler from .shim import Shim class PyTorchShim(Shim): """Interface between a PyTorch model and a Thinc Model. This container is *not* a Thinc Model subclass itself. mixed_precision: Enable mixed-precision. This changes whitelisted ops to run in half precision for better performance and lower memory use. grad_scaler: The gradient scaler to use for mixed-precision training. If this argument is set to "None" and mixed precision is enabled, a gradient scaler with the default configuration is used. device: The PyTorch device to run the model on. When this argument is set to "None", the default device for the currently active Thinc ops is used. """ def __init__( self, model: Any, config=None, optimizer: Any = None, mixed_precision: bool = False, grad_scaler: Optional[PyTorchGradScaler] = None, device: Optional["torch.device"] = None, ): super().__init__(model, config, optimizer) if device is None: device = get_torch_default_device() if model is not None: model.to(device) if grad_scaler is None: grad_scaler = PyTorchGradScaler(mixed_precision) grad_scaler.to_(device) self._grad_scaler = grad_scaler self._mixed_precision = mixed_precision if CupyOps.xp is not None and isinstance(get_current_ops(), CupyOps): pools = context_pools.get() if "pytorch" not in pools: from cupy import get_default_memory_pool set_gpu_allocator("pytorch") get_default_memory_pool().free_all_blocks() def __call__(self, inputs, is_train): if is_train: return self.begin_update(inputs) else: return self.predict(inputs), lambda a: ... @property def device(self): p = next(self._model.parameters(), None) if p is None: return get_torch_default_device() else: return p.device def predict(self, inputs: ArgsKwargs) -> Any: """Pass inputs through to the underlying PyTorch model, and return the output. No conversions are performed. The PyTorch model is set into evaluation mode. """ self._model.eval() with torch.no_grad(): with torch.cuda.amp.autocast(self._mixed_precision): outputs = self._model(*inputs.args, **inputs.kwargs) self._model.train() return outputs def begin_update(self, inputs: ArgsKwargs): """Pass the inputs through to the underlying PyTorch model, keeping track of which items in the input are tensors requiring gradients. If the model returns a single value, it is converted into a one-element tuple. Return the outputs and a callback to backpropagate. """ self._model.train() # Note: mixed-precision autocast must not be applied to backprop. with torch.cuda.amp.autocast(self._mixed_precision): output = self._model(*inputs.args, **inputs.kwargs) def backprop(grads): # Normally, gradient scaling is applied to the loss of a model. However, # since regular thinc layers do not use mixed-precision, we perform scaling # locally in this shim. Scaling the loss by a factor, scales the gradients # by the same factor (see the chain rule). Therefore, we scale the gradients # backprop'ed through the succeeding layer to get the same effect as loss # scaling. grads.kwargs["grad_tensors"] = self._grad_scaler.scale( grads.kwargs["grad_tensors"], inplace=True ) torch.autograd.backward(*grads.args, **grads.kwargs) # Unscale weights and check for overflows during backprop. grad_tensors = [] for torch_data in itertools.chain( self._model.parameters(), iterate_recursive(lambda x: hasattr(x, "grad"), inputs), ): if torch_data.grad is not None: grad_tensors.append(torch_data.grad) found_inf = self._grad_scaler.unscale(grad_tensors) # If there was an over/underflow, return zeroed-out gradients. if found_inf: grad_get = lambda x: x.grad.zero_() if x.grad is not None else x.grad else: grad_get = lambda x: x.grad return convert_recursive(lambda x: hasattr(x, "grad"), grad_get, inputs) return output, backprop def finish_update(self, optimizer: Optimizer): for name, torch_data in self._model.named_parameters(): if torch_data.grad is not None: if ( not self._grad_scaler.found_inf ): # Skip weight update if any gradient overflowed. param, grad = optimizer( (self.id, name), cast(FloatsXd, torch2xp(torch_data.data)), cast(FloatsXd, torch2xp(torch_data.grad)), ) torch_data.data = xp2torch( param, requires_grad=True, device=torch_data.device ) torch_data.grad.zero_() self._grad_scaler.update() @contextlib.contextmanager def use_params(self, params): key_prefix = f"pytorch_{self.id}_" state_dict = {} for k, v in params.items(): if hasattr(k, "startswith") and k.startswith(key_prefix): state_dict[k.replace(key_prefix, "")] = xp2torch(v, device=self.device) if state_dict: backup = {k: v.clone() for k, v in self._model.state_dict().items()} self._model.load_state_dict(state_dict) yield self._model.load_state_dict(backup) else: yield def to_device(self, device_type: str, device_id: int): # pragma: no cover if device_type == "cpu": self._model.cpu() elif device_type == "gpu": self._model.cuda(device_id) else: msg = f"Invalid device_type: {device_type}. Try 'cpu' or 'gpu'" raise ValueError(msg) def to_bytes(self): filelike = BytesIO() torch.save(self._model.state_dict(), filelike) filelike.seek(0) weights_bytes = filelike.getvalue() msg = {"config": self.cfg, "state": weights_bytes} return srsly.msgpack_dumps(msg) def from_bytes(self, bytes_data): device = get_torch_default_device() msg = srsly.msgpack_loads(bytes_data) self.cfg = msg["config"] filelike = BytesIO(msg["state"]) filelike.seek(0) self._model.load_state_dict(torch.load(filelike, map_location=device)) self._model.to(device) self._grad_scaler.to_(device) return self
mit
SU-ECE-17-7/hotspotter
_scripts/_net/logistic_sgd.py
4
15143
""" This tutorial introduces logistic regression using Theano and stochastic gradient descent. Logistic regression is a probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. Mathematically, this can be written as: .. math:: P(Y=i|x, W,b) &= softmax_i(W x + b) \\ &= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}} The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x). .. math:: y_{pred} = argmax_i P(Y=i|x,W,b) This tutorial presents a stochastic gradient descent optimization method suitable for large datasets, and a conjugate gradient optimization method that is suitable for smaller datasets. References: - textbooks: "Pattern Recognition and Machine Learning" - Christopher M. Bishop, section 4.3.2 """ __docformat__ = 'restructedtext en' import cPickle import gzip import os import sys import time import numpy import theano import theano.tensor as T class LogisticRegression(object): """Multi-class Logistic Regression Class The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. """ def __init__(self, input, n_in, n_out): """ Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie """ # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) # initialize the baises b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) # compute vector of class-membership probabilities in symbolic form self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) # compute prediction as class whose probability is maximal in # symbolic form self.y_pred = T.argmax(self.p_y_given_x, axis=1) # parameters of the model self.params = [self.W, self.b] def negative_log_likelihood(self, y): """Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) = \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\ \ell (\theta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size """ # y.shape[0] is (symbolically) the number of rows in y, i.e., # number of examples (call it n) in the minibatch # T.arange(y.shape[0]) is a symbolic vector which will contain # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of # Log-Probabilities (call it LP) with one row per example and # one column per class LP[T.arange(y.shape[0]),y] is a vector # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is # the mean (across minibatch examples) of the elements in v, # i.e., the mean log-likelihood across the minibatch. return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) def errors(self, y): """Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label """ # check if y has same dimension of y_pred if y.ndim != self.y_pred.ndim: raise TypeError('y should have the same shape as self.y_pred', ('y', target.type, 'y_pred', self.y_pred.type)) # check if y is of the correct datatype if y.dtype.startswith('int'): # the T.neq operator returns a vector of 0s and 1s, where 1 # represents a mistake in prediction return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError() def load_data(dataset): ''' Loads the dataset :type dataset: string :param dataset: the path to the dataset (here MNIST) ''' ############# # LOAD DATA # ############# # Download the MNIST dataset if it is not present data_dir, data_file = os.path.split(dataset) if data_dir == "" and not os.path.isfile(dataset): # Check if dataset is in the data directory. new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset) if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz': dataset = new_path if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz': import urllib origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz' print 'Downloading data from %s' % origin urllib.urlretrieve(origin, dataset) print '... loading data' # Load the dataset f = gzip.open(dataset, 'rb') train_set, valid_set, test_set = cPickle.load(f) f.close() #train_set, valid_set, test_set format: tuple(input, target) #input is an numpy.ndarray of 2 dimensions (a matrix) #witch row's correspond to an example. target is a #numpy.ndarray of 1 dimensions (vector)) that have the same length as #the number of rows in the input. It should give the target #target to the example with the same index in the input. def shared_dataset(data_xy, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ data_x, data_y = data_xy shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) # When storing data on the GPU it has to be stored as floats # therefore we will store the labels as ``floatX`` as well # (``shared_y`` does exactly that). But during our computations # we need them as ints (we use labels as index, and if they are # floats it doesn't make sense) therefore instead of returning # ``shared_y`` we will have to cast it to int. This little hack # lets ous get around this issue return shared_x, T.cast(shared_y, 'int32') test_set_x, test_set_y = shared_dataset(test_set) valid_set_x, valid_set_y = shared_dataset(valid_set) train_set_x, train_set_y = shared_dataset(train_set) rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)] return rval def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000, dataset='mnist.pkl.gz', batch_size=600): """ Demonstrate stochastic gradient descent optimization of a log-linear model This is demonstrated on MNIST. :type learning_rate: float :param learning_rate: learning rate used (factor for the stochastic gradient) :type n_epochs: int :param n_epochs: maximal number of epochs to run the optimizer :type dataset: string :param dataset: the path of the MNIST dataset file from http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz """ datasets = load_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch x = T.matrix('x') # the data is presented as rasterized images y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels # construct the logistic regression class # Each MNIST image has size 28*28 classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10) # the cost we minimize during training is the negative log likelihood of # the model in symbolic format cost = classifier.negative_log_likelihood(y) # compiling a Theano function that computes the mistakes that are made by # the model on a minibatch test_model = theano.function(inputs=[index], outputs=classifier.errors(y), givens={ x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size]}) validate_model = theano.function(inputs=[index], outputs=classifier.errors(y), givens={ x: valid_set_x[index * batch_size:(index + 1) * batch_size], y: valid_set_y[index * batch_size:(index + 1) * batch_size]}) # compute the gradient of cost with respect to theta = (W,b) g_W = T.grad(cost=cost, wrt=classifier.W) g_b = T.grad(cost=cost, wrt=classifier.b) # specify how to update the parameters of the model as a list of # (variable, update expression) pairs. updates = [(classifier.W, classifier.W - learning_rate * g_W), (classifier.b, classifier.b - learning_rate * g_b)] # compiling a Theano function `train_model` that returns the cost, but in # the same time updates the parameter of the model based on the rules # defined in `updates` train_model = theano.function(inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index * batch_size:(index + 1) * batch_size], y: train_set_y[index * batch_size:(index + 1) * batch_size]}) ############### # TRAIN MODEL # ############### print '... training the model' # early-stopping parameters patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_params = None best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_model(minibatch_index) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % \ (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # test it on the test set test_losses = [test_model(i) for i in xrange(n_test_batches)] test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of best' ' model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) if patience <= iter: done_looping = True break end_time = time.clock() print(('Optimization complete with best validation score of %f %%,' 'with test performance %f %%') % (best_validation_loss * 100., test_score * 100.)) print 'The code run for %d epochs, with %f epochs/sec' % ( epoch, 1. * epoch / (end_time - start_time)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time))) if __name__ == '__main__': sgd_optimization_mnist()
apache-2.0
vinayak-mehta/scikit-learn
examples/feature_selection/plot_feature_selection_pipeline.py
8
2757
""" ================== Pipeline ANOVA SVM ================== This example shows how a feature selection can be easily integrated within a machine learning pipeline. We also show that you can easily inspect part of the pipeline. """ # %% # We will start by generating a binary classification dataset. Subsequently, we # will divide the dataset into two subsets. from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split X, y = make_classification( n_features=20, n_informative=3, n_redundant=0, n_classes=2, n_clusters_per_class=2, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # %% # A common mistake done with feature selection is to search a subset of # discriminative features on the full dataset, instead of only using the # training set. The usage of scikit-learn :func:`~sklearn.pipeline.Pipeline` # prevents to make such mistake. # # Here, we will demonstrate how to build a pipeline where the first step will # be the feature selection. # # When calling `fit` on the training data, a subset of feature will be selected # and the index of these selected features will be stored. The feature selector # will subsequently reduce the number of features, and pass this subset to the # classifier which will be trained. from sklearn.feature_selection import SelectKBest, f_classif from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC anova_filter = SelectKBest(f_classif, k=3) clf = LinearSVC() anova_svm = make_pipeline(anova_filter, clf) anova_svm.fit(X_train, y_train) # %% # Once the training is complete, we can predict on new unseen samples. In this # case, the feature selector will only select the most discriminative features # based on the information stored during training. Then, the data will be # passed to the classifier which will make the prediction. # # Here, we show the final metrics via a classification report. from sklearn.metrics import classification_report y_pred = anova_svm.predict(X_test) print(classification_report(y_test, y_pred)) # %% # Be aware that you can inspect a step in the pipeline. For instance, we might # be interested about the parameters of the classifier. Since we selected # three features, we expect to have three coefficients. anova_svm[-1].coef_ # %% # However, we do not know which features were selected from the original # dataset. We could proceed by several manners. Here, we will invert the # transformation of these coefficients to get information about the original # space. anova_svm[:-1].inverse_transform(anova_svm[-1].coef_) # %% # We can see that the features with non-zero coefficients are the selected # features by the first step.
bsd-3-clause
alexfields/ORF-RATER
regress_orfs.py
1
30299
#! /usr/bin/env python import argparse import os import pysam import pandas as pd import numpy as np from scipy.optimize import nnls import scipy.sparse import multiprocessing as mp from hashed_read_genome_array import HashedReadBAMGenomeArray, ReadKeyMapFactory, read_length_nmis, get_hashed_counts from plastid.genomics.roitools import SegmentChain, positionlist_to_segments import sys from time import strftime parser = argparse.ArgumentParser(description='Use linear regression to identify likely sites of translation. Regression will be performed for ORFs ' 'defined by find_orfs_and_types.py using a metagene profile constructed from annotated CDSs. If ' 'multiple ribosome profiling datasets are to be analyzed separately (e.g. if they were collected under ' 'different drug treatments), then this program should be run separately for each, ideally in separate ' 'subfolders indicated by SUBDIR.') parser.add_argument('bamfiles', nargs='+', help='Path to transcriptome-aligned BAM file(s) for read data') parser.add_argument('--subdir', default=os.path.curdir, help='Convenience argument when dealing with multiple datasets. In such a case, set SUBDIR to an appropriate name (e.g. HARR, ' 'CHX) to avoid file conflicts. (Default: current directory)') parser.add_argument('--restrictbystarts', nargs='+', help='Subdirectory/subdirectories or filename(s) containing regression output to use to restrict ORFs for regression. If a ' 'directory or list of directories, file(s) of name REGRESSFILE (regression.h5 by default) will be searched for within them. ' 'For use to restrict regression on e.g. CHX or no-drug data based only on positive hits from e.g. HARR or LTM data. ' 'Value(s) of MINWSTART indicate the minimum W statistic to require. If multiple directories/files are provided, start ' 'sites will be taken from their union.') parser.add_argument('--minwstart', type=float, nargs='+', default=[0], help='Minimum W_start statistic to require for regression output in RESTRICTBYSTARTS. If only one value is given, it will be ' 'assumed to apply to all; if multiple values are given, the number of values must match the number of values provided for ' 'RESTRICTBYSTARTS. Ignored if RESTRICTBYSTARTS not included. (Default: 0)') parser.add_argument('--orfstore', default='orf.h5', help='Path to pandas HDF store containing ORFs to regress; generated by find_orfs_and_types.py (Default: orf.h5)') parser.add_argument('--inbed', default='transcripts.bed', help='Transcriptome BED-file (Default: transcripts.bed)') parser.add_argument('--offsetfile', default='offsets.txt', help='Path to 2-column tab-delimited file with 5\' offsets for variable P-site mappings. First column indicates read length, ' 'second column indicates offset to apply. Read lengths are calculated after trimming up to MAX5MIS 5\' mismatches. Accepted ' 'read lengths are defined by those present in the first column of this file. If SUBDIR is set, this file is assumed to be ' 'in that directory. (Default: offsets.txt)') parser.add_argument('--max5mis', type=int, default=1, help='Maximum 5\' mismatches to trim. Reads with more than this number will be excluded.' '(Default: 1)') parser.add_argument('--regressfile', default='regression.h5', help='Filename to which to output the table of regression scores for each ORF. Formatted as pandas HDF (tables generated include ' '"start_strengths", "orf_strengths", and "stop_strengths"). If SUBDIR is set, this file will be placed in that directory. ' '(Default: regression.h5)') parser.add_argument('--startonly', action='store_true', help='Toggle for datasets collected in the presence of initiation inhibitor (e.g. HARR, ' 'LTM). If selected, "stop_strengths" will not be calculated or saved.') parser.add_argument('--startrange', type=int, nargs=2, default=[1, 50], help='Region around start codon (in codons) to model explicitly. Ignored if reading metagene from file (Default: 1 50, meaning ' 'one full codon before the start is modeled, as are the start codon and the 49 codons following it).') parser.add_argument('--stoprange', type=int, nargs=2, default=[7, 0], help='Region around stop codon (in codons) to model explicitly. Ignored if reading metagene from file (Default: 7 0, meaning ' 'seven full codons before and including the stop are modeled, but none after).') parser.add_argument('--mincdsreads', type=int, default=64, help='Minimum number of reads required within the body of the CDS (and any surrounding nucleotides indicated by STARTRANGE or ' 'STOPRANGE) for it to be included in the metagene. Ignored if reading metagene from file (Default: 64).') parser.add_argument('--startcount', type=int, default=0, help='Minimum reads at putative translation initiation codon. Useful to reduce computational burden by only considering ORFs ' 'with e.g. at least 1 read at the start. (Default: 0)') parser.add_argument('--metagenefile', default='metagene.txt', help='File to save metagene profile, OR if the file already exists, it will be used as the input metagene. Formatted as ' 'tab-delimited text, with position, readlength, value, and type ("START", "CDS", or "STOP"). If SUBDIR is set, this file ' 'will be placed in that directory. (Default: metagene.txt)') parser.add_argument('--noregress', action='store_true', help='Only generate a metagene (i.e. do not perform any regressions)') parser.add_argument('--exclude', nargs='+', help='Names of transcript families (tfams) to exclude from analysis due to excessive computational time ' 'or memory footprint (e.g. TTN can be so large that the regression never finishes).') parser.add_argument('-v', '--verbose', action='count', help='Output a log of progress and timing (to stdout). Repeat for higher verbosity level.') parser.add_argument('-p', '--numproc', type=int, default=1, help='Number of processes to run. Defaults to 1 but more recommended if available.') parser.add_argument('-f', '--force', action='store_true', help='Force file overwrite. This will overwrite both METAGENEFILE and REGRESSFILE, if they exist. To overwrite only REGRESSFILE ' '(and not the METAGENEFILE), do not invoke this option but simply delete REGRESSFILE.') opts = parser.parse_args() offsetfilename = os.path.join(opts.subdir, opts.offsetfile) metafilename = os.path.join(opts.subdir, opts.metagenefile) regressfilename = os.path.join(opts.subdir, opts.regressfile) if not opts.force: if os.path.exists(regressfilename): if os.path.exists(metafilename): raise IOError('%s exists; use --force to overwrite (will also recalculate metagene and overwrite %s)' % (regressfilename, metafilename)) raise IOError('%s exists; use --force to overwrite' % regressfilename) restrictbystartfilenames = [] if opts.restrictbystarts: if len(opts.restrictbystarts) > 1 and len(opts.minwstart) == 1: opts.minwstart *= len(opts.restrictbystarts) # expand the list to the same number of arguments if len(opts.minwstart) != len(opts.restrictbystarts): raise ValueError('--minwstart must be given same number of values as --restrictbystarts, or one value for all') for restrictbystart in opts.restrictbystarts: if os.path.isfile(restrictbystart): restrictbystartfilenames.append(restrictbystart) elif os.path.isdir(restrictbystart) and os.path.isfile(os.path.join(restrictbystart, opts.regressfile)): restrictbystartfilenames.append(os.path.join(restrictbystart, opts.regressfile)) else: raise IOError('Regression file/directory %s not found' % restrictbystart) if opts.verbose: sys.stdout.write(' '.join(sys.argv) + '\n') def logprint(nextstr): sys.stdout.write('[%s] %s\n' % (strftime('%Y-%m-%d %H:%M:%S'), nextstr)) sys.stdout.flush() log_lock = mp.Lock() rdlens = [] Pdict = {} with open(offsetfilename, 'rU') as infile: for line in infile: ls = line.strip().split() rdlen = int(ls[0]) for nmis in range(opts.max5mis+1): Pdict[(rdlen, nmis)] = int(ls[1])+nmis # e.g. if nmis == 1, offset as though the read were missing that base entirely rdlens.append(rdlen) # Pdict = {(int(ls[0]), nmis): int(ls[1])+nmis for ls in [line.strip().split() for line in infile] for nmis in range(opts.max5mis+1)} # Pdict = {(ls[0], nmis): ls[1] for ls in [line.strip().split() for line in infile] if opts.maxrdlen >= ls[0] >= opts.minrdlen # for nmis in range(opts.max5mis+1)} rdlens.sort() # hash transcripts by ID for easy reference later with open(opts.inbed, 'rU') as inbed: bedlinedict = {line.split()[3]: line for line in inbed} def _get_annotated_counts_by_chrom(chrom_to_do): """Accumulate counts from annotated CDSs into a metagene profile. Only the longest CDS in each transcript family will be included, and only if it meets the minimum number-of-reads requirement. Reads are normalized by gene, so every gene included contributes equally to the final metagene.""" found_cds = pd.read_hdf(opts.orfstore, 'all_orfs', mode='r', where="chrom == '%s' and orftype == 'annotated' and tstop > 0 and tcoord > %d and AAlen > %d" % (chrom_to_do, -startnt[0], min_AAlen), columns=['orfname', 'tfam', 'tid', 'tcoord', 'tstop', 'AAlen']) \ .sort_values('AAlen', ascending=False).drop_duplicates('tfam') # use the longest annotated CDS in each transcript family num_cds_incl = 0 # number of CDSs included from this chromosome startprof = np.zeros((len(rdlens), startlen)) cdsprof = np.zeros((len(rdlens), 3)) stopprof = np.zeros((len(rdlens), stoplen)) inbams = [pysam.Samfile(infile, 'rb') for infile in opts.bamfiles] gnd = HashedReadBAMGenomeArray(inbams, ReadKeyMapFactory(Pdict, read_length_nmis)) for (tid, tcoord, tstop) in found_cds[['tid', 'tcoord', 'tstop']].itertuples(False): curr_trans = SegmentChain.from_bed(bedlinedict[tid]) tlen = curr_trans.get_length() if tlen >= tstop + stopnt[1]: # need to guarantee that the 3' UTR is sufficiently long curr_hashed_counts = get_hashed_counts(curr_trans, gnd) cdslen = tstop+stopnt[1]-tcoord-startnt[0] # cds length, plus the extra bases... curr_counts = np.zeros((len(rdlens), cdslen)) for (i, rdlen) in enumerate(rdlens): for nmis in range(opts.max5mis+1): curr_counts[i, :] += curr_hashed_counts[(rdlen, nmis)][tcoord+startnt[0]:tstop+stopnt[1]] # curr_counts is limited to the CDS plus any extra requested nucleotides on either side if curr_counts.sum() >= opts.mincdsreads: curr_counts /= curr_counts.mean() # normalize by mean of counts across all readlengths and positions within the CDS startprof += curr_counts[:, :startlen] cdsprof += curr_counts[:, startlen:cdslen-stoplen].reshape((len(rdlens), -1, 3)).mean(1) stopprof += curr_counts[:, cdslen-stoplen:cdslen] num_cds_incl += 1 for inbam in inbams: inbam.close() return startprof, cdsprof, stopprof, num_cds_incl def _orf_profile(orflen): """Generate a profile for an ORF based on the metagene profile Parameters ---------- orflen : int Number of nucleotides in the ORF, including the start and stop codons Returns ------- np.ndarray<float> The expected profile for the ORF. Number of rows will match the number of rows in the metagene profile. Number of columns will be orflen + stopnt[1] - startnt[0] """ assert orflen % 3 == 0 assert orflen > 0 short_stop = 9 if orflen >= startnt[1]-stopnt[0]: # long enough to include everything return np.hstack((startprof, np.tile(cdsprof, (orflen-startnt[1]+stopnt[0])/3), stopprof)) elif orflen >= startnt[1]+short_stop: return np.hstack((startprof, stopprof[:, startnt[1]-orflen-stopnt[1]:])) elif orflen >= short_stop: return np.hstack((startprof[:, :orflen-short_stop-startnt[0]], stopprof[:, -short_stop-stopnt[1]:])) else: # very short! return np.hstack((startprof[:, :3-startnt[0]], stopprof[:, 3-orflen-stopnt[0]:])) if opts.startonly: failure_return = (pd.DataFrame(), pd.DataFrame()) else: failure_return = (pd.DataFrame(), pd.DataFrame(), pd.DataFrame()) def _regress_tfam(orf_set, gnd): """Performs non-negative least squares regression on all of the ORFs in a transcript family, using profiles constructed via _orf_profile() Also calculates Wald statistics for each orf and start codon, and for each stop codon if opts.startonly is False""" tfam = orf_set['tfam'].iat[0] strand = orf_set['strand'].iat[0] chrom = orf_set['chrom'].iat[0] tids = orf_set['tid'].drop_duplicates().tolist() all_tfam_genpos = set() tid_genpos = {} tlens = {} for (i, tid) in enumerate(tids): currtrans = SegmentChain.from_bed(bedlinedict[tid]) curr_pos_set = currtrans.get_position_set() tlens[tid] = len(curr_pos_set) tid_genpos[tid] = curr_pos_set all_tfam_genpos.update(curr_pos_set) tfam_segs = SegmentChain(*positionlist_to_segments(chrom, strand, list(all_tfam_genpos))) all_tfam_genpos = np.array(sorted(all_tfam_genpos)) if strand == '-': all_tfam_genpos = all_tfam_genpos[::-1] nnt = len(all_tfam_genpos) tid_indices = {tid: np.flatnonzero(np.in1d(all_tfam_genpos, list(curr_tid_genpos), assume_unique=True)) for (tid, curr_tid_genpos) in tid_genpos.iteritems()} hashed_counts = get_hashed_counts(tfam_segs, gnd) counts = np.zeros((len(rdlens), nnt), dtype=np.float64) # even though they are integer-valued, will need to do float arithmetic for (i, rdlen) in enumerate(rdlens): for nmis in range(1+opts.max5mis): counts[i, :] += hashed_counts[(rdlen, nmis)] counts = counts.ravel() if opts.startcount: # Only include ORFS for which there is at least some minimum reads within one nucleotide of the start codon offsetmat = np.tile(nnt*np.arange(len(rdlens)), 3) # offsets for each cond, expecting three positions to check for each # try: orf_set = orf_set[[(counts[(start_idxes.repeat(len(rdlens))+offsetmat)].sum() >= opts.startcount) for start_idxes in [tid_indices[tid][tcoord-1:tcoord+2] for (tid, tcoord, tstop) in orf_set[['tid', 'tcoord', 'tstop']].itertuples(False)]]] if orf_set.empty: return failure_return orf_strength_df = orf_set.sort_values('tcoord', ascending=False).drop_duplicates('orfname').reset_index(drop=True) abort_set = orf_set.drop_duplicates('gcoord').copy() abort_set['gstop'] = abort_set['gcoord'] # should maybe be +/-3, but then need to worry about splicing - and this is an easy flag abort_set['tstop'] = abort_set['tcoord']+3 # stop after the first codon abort_set['orfname'] = abort_set['gcoord'].apply(lambda x: '%s_%d_abort' % (tfam, x)) orf_strength_df = pd.concat((orf_strength_df, abort_set), ignore_index=True) if not opts.startonly: # if marking full ORFs, include histop model stop_set = orf_set.drop_duplicates('gstop').copy() stop_set['gcoord'] = stop_set['gstop'] # this is an easy flag stop_set['tcoord'] = stop_set['tstop'] # should probably be -3 nt, but this is another easy flag that distinguishes from abinit stop_set['orfname'] = stop_set['gstop'].apply(lambda x: '%s_%d_stop' % (tfam, x)) orf_strength_df = pd.concat((orf_strength_df, stop_set), ignore_index=True) orf_profs = [] indices = [] for (tid, tcoord, tstop) in orf_strength_df[['tid', 'tcoord', 'tstop']].itertuples(False): if tcoord != tstop: # not a histop tlen = tlens[tid] if tcoord+startnt[0] < 0: startadj = -startnt[0]-tcoord # number of nts to remove from the start due to short 5' UTR; guaranteed > 0 else: startadj = 0 if tstop+stopnt[1] > tlen: stopadj = tstop+stopnt[1]-tlen # number of nts to remove from the end due to short 3' UTR; guaranteed > 0 else: stopadj = 0 curr_indices = tid_indices[tid][tcoord+startnt[0]+startadj:tstop+stopnt[1]-stopadj] orf_profs.append(_orf_profile(tstop-tcoord)[:, startadj:tstop-tcoord+stopnt[1]-startnt[0]-stopadj].ravel()) else: # histop curr_indices = tid_indices[tid][tstop-6:tstop] orf_profs.append(stopprof[:, -6:].ravel()) indices.append(np.concatenate([nnt*i+curr_indices for i in xrange(len(rdlens))])) # need to tile the indices for each read length if len(indices[-1]) != len(orf_profs[-1]): raise AssertionError('ORF length does not match index length') orf_matrix = scipy.sparse.csc_matrix((np.concatenate(orf_profs), np.concatenate(indices), np.cumsum([0]+[len(curr_indices) for curr_indices in indices])), shape=(nnt*len(rdlens), len(orf_strength_df))) # better to make it a sparse matrix, even though nnls requires a dense matrix, because of linear algebra to come nonzero_orfs = np.flatnonzero(orf_matrix.T.dot(counts) > 0) if len(nonzero_orfs) == 0: # no possibility of anything coming up return failure_return orf_matrix = orf_matrix[:, nonzero_orfs] orf_strength_df = orf_strength_df.iloc[nonzero_orfs] # don't bother fitting ORFs with zero reads throughout their entire length (orf_strs, resid) = nnls(orf_matrix.toarray(), counts) min_str = 1e-6 # allow for machine rounding error usable_orfs = orf_strs > min_str if not usable_orfs.any(): return failure_return orf_strength_df = orf_strength_df[usable_orfs] orf_matrix = orf_matrix[:, usable_orfs] # remove entries for zero-strength ORFs or transcripts orf_strs = orf_strs[usable_orfs] orf_strength_df['orf_strength'] = orf_strs covmat = resid*resid*np.linalg.inv(orf_matrix.T.dot(orf_matrix).toarray())/(nnt*len(rdlens)-len(orf_strength_df)) # homoscedastic version (assume equal variance at all positions) # resids = counts-orf_matrix.dot(orf_strs) # simple_covmat = np.linalg.inv(orf_matrix.T.dot(orf_matrix).toarray()) # covmat = simple_covmat.dot(orf_matrix.T.dot(scipy.sparse.dia_matrix((resids*resids, 0), (len(resids), len(resids)))) # .dot(orf_matrix).dot(simple_covmat)) # # heteroscedastic version (Eicker-Huber-White robust estimator) orf_strength_df['W_orf'] = orf_strength_df['orf_strength']*orf_strength_df['orf_strength']/np.diag(covmat) orf_strength_df.set_index('orfname', inplace=True) elongating_orfs = ~(orf_strength_df['gstop'] == orf_strength_df['gcoord']) if opts.startonly: # count abortive initiation events towards start strength in this case include_starts = (orf_strength_df['tcoord'] != orf_strength_df['tstop']) if not include_starts.any(): return failure_return # no need to keep going if there weren't any useful starts gcoord_grps = orf_strength_df[include_starts].groupby('gcoord') # even if we are willing to count abinit towards start strength, we certainly shouldn't count histop covmat_starts = covmat[np.ix_(include_starts.values, include_starts.values)] orf_strs_starts = orf_strs[include_starts.values] else: if not elongating_orfs.any(): return failure_return gcoord_grps = orf_strength_df[elongating_orfs].groupby('gcoord') covmat_starts = covmat[np.ix_(elongating_orfs.values, elongating_orfs.values)] orf_strs_starts = orf_strs[elongating_orfs.values] start_strength_df = pd.DataFrame.from_items([('tfam', tfam), ('chrom', orf_set['chrom'].iloc[0]), ('strand', orf_set['strand'].iloc[0]), ('codon', gcoord_grps['codon'].first()), ('start_strength', gcoord_grps['orf_strength'].aggregate(np.sum))]) start_strength_df['W_start'] = pd.Series({gcoord: orf_strs_starts[rownums].dot(np.linalg.inv(covmat_starts[np.ix_(rownums, rownums)])) .dot(orf_strs_starts[rownums]) for (gcoord, rownums) in gcoord_grps.indices.iteritems()}) if not opts.startonly: # count histop towards the stop codon - but still exclude abinit include_stops = (elongating_orfs | (orf_strength_df['tcoord'] == orf_strength_df['tstop'])) gstop_grps = orf_strength_df[include_stops].groupby('gstop') covmat_stops = covmat[np.ix_(include_stops.values, include_stops.values)] orf_strs_stops = orf_strs[include_stops.values] stop_strength_df = pd.DataFrame.from_items([('tfam', tfam), ('chrom', orf_set['chrom'].iloc[0]), ('strand', orf_set['strand'].iloc[0]), ('stop_strength', gstop_grps['orf_strength'].aggregate(np.sum))]) stop_strength_df['W_stop'] = pd.Series({gstop: orf_strs_stops[rownums].dot(np.linalg.inv(covmat_stops[np.ix_(rownums, rownums)])) .dot(orf_strs_stops[rownums]) for (gstop, rownums) in gstop_grps.indices.iteritems()}) # # nohistop # gstop_grps = orf_strength_df[elongating_orfs].groupby('gstop') # covmat_stops = covmat[np.ix_(elongating_orfs.values, elongating_orfs.values)] # orf_strs_stops = orf_strs[elongating_orfs.values] # stop_strength_df['stop_strength_nohistop'] = gstop_grps['orf_strength'].aggregate(np.sum) # stop_strength_df['W_stop_nohistop'] = pd.Series({gstop:orf_strs_stops[rownums].dot(np.linalg.inv(covmat_stops[np.ix_(rownums,rownums)])) # .dot(orf_strs_stops[rownums]) for (gstop, rownums) in gstop_grps.indices.iteritems()}) return orf_strength_df, start_strength_df, stop_strength_df else: return orf_strength_df, start_strength_df def _regress_chrom(chrom_to_do): """Applies _regress_tfam() to all of the transcript families on a chromosome""" chrom_orfs = pd.read_hdf(opts.orfstore, 'all_orfs', mode='r', where="chrom == %r and tstop > 0 and tcoord > 0" % chrom_to_do, columns=['orfname', 'tfam', 'tid', 'tcoord', 'tstop', 'AAlen', 'chrom', 'gcoord', 'gstop', 'strand', 'codon', 'orftype', 'annot_start', 'annot_stop']) # tcoord > 0 removes ORFs where the first codon is an NTG, to avoid an indexing error # Those ORFs would never get called anyway since they couldn't possibly have any reads at their start codon if opts.exclude: chrom_orfs = chrom_orfs[~chrom_orfs['tfam'].isin(opts.exclude)] if restrictbystartfilenames: restrictedstarts = pd.DataFrame() for (restrictbystart, minw) in zip(restrictbystartfilenames, opts.minwstart): restrictedstarts = restrictedstarts.append( pd.read_hdf(restrictbystart, 'start_strengths', mode='r', where="(chrom == %r) & (W_start > minw)" % chrom_to_do, columns=['tfam', 'chrom', 'gcoord', 'strand']), ignore_index=True).drop_duplicates() chrom_orfs = chrom_orfs.merge(restrictedstarts) # inner merge acts as a filter if chrom_orfs.empty: if opts.verbose > 1: with log_lock: logprint('No ORFs found on %s' % chrom_to_do) return failure_return inbams = [pysam.Samfile(infile, 'rb') for infile in opts.bamfiles] gnd = HashedReadBAMGenomeArray(inbams, ReadKeyMapFactory(Pdict, read_length_nmis)) res = tuple([pd.concat(res_dfs) for res_dfs in zip(*[_regress_tfam(tfam_set, gnd) for (tfam, tfam_set) in chrom_orfs.groupby('tfam')])]) for inbam in inbams: inbam.close() if opts.verbose > 1: with log_lock: logprint('%s complete' % chrom_to_do) return res with pd.HDFStore(opts.orfstore, mode='r') as orfstore: chroms = orfstore.select('all_orfs/meta/chrom/meta').values # because saved as categorical, this is the list of all chromosomes if os.path.isfile(metafilename) and not opts.force: if opts.verbose: logprint('Loading metagene') metagene = pd.read_csv(metafilename, sep='\t').set_index(['region', 'position']) metagene.columns = metagene.columns.astype(int) # they are read lengths assert (metagene.columns == rdlens).all() startprof = metagene.loc['START'] cdsprof = metagene.loc['CDS'] stopprof = metagene.loc['STOP'] startnt = (startprof.index.min(), startprof.index.max()+1) assert len(cdsprof) == 3 stopnt = (stopprof.index.min(), stopprof.index.max()+1) startprof = startprof.values.T cdsprof = cdsprof.values.T stopprof = stopprof.values.T else: if opts.verbose: logprint('Calculating metagene') startnt = (-abs(opts.startrange[0])*3, abs(opts.startrange[1])*3) # force <=0 and >= 0 for the bounds stopnt = (-abs(opts.stoprange[0])*3, abs(opts.stoprange[1])*3) if stopnt[0] >= -6: raise ValueError('STOPRANGE must encompass at least 3 codons prior to the stop') min_AAlen = (startnt[1]-stopnt[0])/3 # actually should be longer than this to ensure at least one codon in the body startlen = startnt[1]-startnt[0] stoplen = stopnt[1]-stopnt[0] workers = mp.Pool(opts.numproc) (startprof, cdsprof, stopprof, num_cds_incl) = [sum(x) for x in zip(*workers.map(_get_annotated_counts_by_chrom, chroms))] workers.close() startprof /= num_cds_incl # technically not necessary, but helps for consistency of units across samples cdsprof /= num_cds_incl stopprof /= num_cds_incl pd.concat((pd.DataFrame(data=startprof.T, index=pd.MultiIndex.from_tuples([('START', x) for x in range(*startnt)], names=['region', 'position']), columns=pd.Index(rdlens, name='rdlen')), pd.DataFrame(data=cdsprof.T, index=pd.MultiIndex.from_tuples([('CDS', x) for x in range(3)], names=['region', 'position']), columns=pd.Index(rdlens, name='rdlen')), pd.DataFrame(data=stopprof.T, index=pd.MultiIndex.from_tuples([('STOP', x) for x in range(*stopnt)], names=['region', 'position']), columns=pd.Index(rdlens, name='rdlen')))) \ .to_csv(metafilename, sep='\t') catfields = ['chrom', 'strand', 'codon', 'orftype'] if not opts.noregress: if opts.verbose: logprint('Calculating regression results by chromosome') workers = mp.Pool(opts.numproc) if opts.startonly: (orf_strengths, start_strengths) = \ [pd.concat(res_dfs).reset_index() for res_dfs in zip(*workers.map(_regress_chrom, chroms))] if opts.verbose: logprint('Saving results') for catfield in catfields: if catfield in start_strengths.columns: start_strengths[catfield] = start_strengths[catfield].astype('category') # saves disk space and read/write time if catfield in orf_strengths.columns: orf_strengths[catfield] = orf_strengths[catfield].astype('category') # saves disk space and read/write time with pd.HDFStore(regressfilename, mode='w') as outstore: outstore.put('orf_strengths', orf_strengths, format='t', data_columns=True) outstore.put('start_strengths', start_strengths, format='t', data_columns=True) else: (orf_strengths, start_strengths, stop_strengths) = \ [pd.concat(res_dfs).reset_index() for res_dfs in zip(*workers.map(_regress_chrom, chroms))] if opts.verbose: logprint('Saving results') for catfield in catfields: if catfield in start_strengths.columns: start_strengths[catfield] = start_strengths[catfield].astype('category') # saves disk space and read/write time if catfield in orf_strengths.columns: orf_strengths[catfield] = orf_strengths[catfield].astype('category') # saves disk space and read/write time if catfield in stop_strengths.columns: stop_strengths[catfield] = stop_strengths[catfield].astype('category') # saves disk space and read/write time with pd.HDFStore(regressfilename, mode='w') as outstore: outstore.put('orf_strengths', orf_strengths, format='t', data_columns=True) outstore.put('start_strengths', start_strengths, format='t', data_columns=True) outstore.put('stop_strengths', stop_strengths, format='t', data_columns=True) workers.close() if opts.verbose: logprint('Tasks complete')
bsd-3-clause
shishaochen/TensorFlow-0.8-Win
tensorflow/contrib/learn/python/learn/datasets/__init__.py
3
1803
"""Module inclues reference datasets and utilities to load datasets.""" # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import collections from os import path import numpy as np from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.datasets import mnist from tensorflow.contrib.learn.python.learn.datasets import text_datasets # Export load_iris and load_boston. load_iris = base.load_iris load_boston = base.load_boston # List of all available datasets. # Note, currently they may return different types. DATASETS = { # Returns base.Dataset. 'iris': base.load_iris, 'boston': base.load_boston, # Returns base.Datasets (train/validation/test sets). 'mnist': mnist.load_mnist, 'dbpedia': text_datasets.load_dbpedia, } def load_dataset(name): """Loads dataset by name. Args: name: Name of the dataset to load. Returns: Features and targets for given dataset. Can be numpy or iterator. """ if name not in DATASETS: raise ValueError('Name of dataset is not found: %s' % name) return DATASETS[name]()
apache-2.0
procoder317/scikit-learn
examples/decomposition/plot_incremental_pca.py
243
1878
""" =============== Incremental PCA =============== Incremental principal component analysis (IPCA) is typically used as a replacement for principal component analysis (PCA) when the dataset to be decomposed is too large to fit in memory. IPCA builds a low-rank approximation for the input data using an amount of memory which is independent of the number of input data samples. It is still dependent on the input data features, but changing the batch size allows for control of memory usage. This example serves as a visual check that IPCA is able to find a similar projection of the data to PCA (to a sign flip), while only processing a few samples at a time. This can be considered a "toy example", as IPCA is intended for large datasets which do not fit in main memory, requiring incremental approaches. """ print(__doc__) # Authors: Kyle Kastner # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.decomposition import PCA, IncrementalPCA iris = load_iris() X = iris.data y = iris.target n_components = 2 ipca = IncrementalPCA(n_components=n_components, batch_size=10) X_ipca = ipca.fit_transform(X) pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X) for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]: plt.figure(figsize=(8, 8)) for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names): plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1], c=c, label=target_name) if "Incremental" in title: err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean() plt.title(title + " of iris dataset\nMean absolute unsigned error " "%.6f" % err) else: plt.title(title + " of iris dataset") plt.legend(loc="best") plt.axis([-4, 4, -1.5, 1.5]) plt.show()
bsd-3-clause
ychfan/tensorflow
tensorflow/python/keras/_impl/keras/datasets/cifar100.py
15
2129
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CIFAR100 small image classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch from tensorflow.python.keras._impl.keras.utils.data_utils import get_file def load_data(label_mode='fine'): """Loads CIFAR100 dataset. Arguments: label_mode: one of "fine", "coarse". Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Raises: ValueError: in case of invalid `label_mode`. """ if label_mode not in ['fine', 'coarse']: raise ValueError('label_mode must be one of "fine" "coarse".') dirname = 'cifar-100-python' origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) fpath = os.path.join(path, 'train') x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels') fpath = os.path.join(path, 'test') x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels') y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
apache-2.0
LarsDu/DeepNuc
deepnuc/nucbinaryclassifier.py
2
15464
import tensorflow as tf import numpy as np import sklearn.metrics as metrics #from databatcher import DataBatcher import nucconvmodel #import dubiotools as dbt import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pprint from itertools import cycle import os import sys #Logging imports from logger import Logger from nucinference import NucInference from collections import OrderedDict class NucBinaryClassifier(NucInference): use_onehot_labels = True def __init__(self, sess, train_batcher, test_batcher, num_epochs, learning_rate, batch_size, seq_len, save_dir, keep_prob=0.5, beta1=0.9, concat_revcom_input=False, nn_method_key="inferenceA", pos_index=1): """NucBinaryClassifier encapsulates training and data evaluation for :param sess: tf.Session() object :param train_batcher: DataBatcher object for training set :param test_batcher: DataBatcher object for test set :param num_epochs: Number of epoch cycles to perform training :param learning_rate: Learning rate :param batch_size: Mini-batch pull size :param seq_len: Sequence length :param save_dir: Root save directory for binary classification model :param keep_prob: Probability of keeping weight for dropout regularization :param beta1: Beta1 parameter for AdamOptimizer :param concat_revcom_input: If true, concatenate reverse complement of nucleotide sequence to input vector :param nn_method_key: Dictionary key for inference method found in nucconvmodels.py file. Determines which model to use. Example: "inferenceA" will run nucconvmodels.inferenceA :param pos_index: The index to use for the positive class (defaults to 1) :returns: a NucBinaryClassifier object :rtype: NucBinaryClassifier """ super(NucBinaryClassifier, self).__init__(sess, train_batcher, test_batcher, num_epochs, learning_rate, batch_size, seq_len, save_dir, keep_prob, beta1, concat_revcom_input, nn_method_key="inferenceA") if self.train_batcher.num_classes != 2: print "Error, more than two classes detected in train batcher" else: self.num_classes = 2 #The index for the label that should be considered the positive class self.pos_index=pos_index self.save_on_epoch = 5 def build_model(self): self.dna_seq_placeholder = tf.placeholder(tf.float32, shape=[None,self.seq_len,4], name="dna_seq") self.labels_placeholder = tf.placeholder(tf.float32, shape=[None, self.num_classes], name="labels") self.keep_prob_placeholder = tf.placeholder(tf.float32,name="keep_prob") self.logits, self.network = self.nn_method(self.dna_seq_placeholder, self.keep_prob_placeholder, self.num_classes) self.probs = tf.nn.softmax(self.logits) self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=self.logits)) ''' Calculate metrics. num_true positives is the number of true positives for the current batch Table below shows index if tf.argmax is applied +-----+-----------+---------+ | | Classifier| Label | +-----+-----------+---------+ | TP | 1 | 1 | +-----+-----------+---------+ | FP | 1 | 0 | +-----+-----------+---------+ | TN | 0 | 0 | +-----+-----------+---------+ | FN | 0 | 1 | +-----+-----------+---------+ Precision = TP/(TP+FP) Recall = TP/(TP+FN) F1-score = 2*(Prec*Rec)/(Prec+Rec) # Note: I ended up not using the tp,fp,tn,fn ops because I ended up calculating # these metrics using sklearn. ''' #correct = TN+TP #Used for calculating accuracy self.logits_ind = tf.argmax(self.logits,1) self.labels_ind = tf.argmax(self.labels_placeholder,1) #Create max_mask of logits (ie: [-.5,.5] --> [0 1]. Note logits have # shape [batch_size * num_classes= 2] #self.inverse_logits_col = tf.ones_like(self.logits_ind) - self.logits_ind #self.max_mask_logits = tf.concat([self.inverse_logits_col,self.logits_ind],1) #True positives where logits_ind+labels_ind == 2 #True negatives where logits_ind+labels_ind == 0 self.sum_ind = tf.add(self.logits_ind,self.labels_ind) self.true_positives = tf.equal(self.sum_ind,2*tf.ones_like(self.sum_ind)) #bool self.num_true_positives =tf.reduce_sum(tf.cast(self.true_positives, tf.int32)) #For FP classifier index > label index self.false_positives=tf.greater(self.logits_ind,self.labels_ind) self.num_false_positives = tf.reduce_sum(tf.cast(self.false_positives, tf.int32)) self.true_negatives = tf.equal(self.sum_ind,tf.zeros_like(self.sum_ind)) #bool self.num_true_negatives= tf.reduce_sum(tf.cast(self.true_negatives,tf.int32)) #For FN classifier index < label index self.false_negatives=tf.less(self.logits_ind,self.labels_ind) self.num_false_negatives = tf.reduce_sum(tf.cast(self.false_negatives,tf.int32)) #num correct can be used to calculate accuracy self.correct = tf.equal(self.logits_ind,self.labels_ind) self.num_correct= tf.reduce_sum(tf.cast(self.correct, tf.int32)) self.relevance =self.network.relevance_backprop(tf.multiply(self.logits, self.labels_placeholder)) '''Write and consolidate summaries''' self.loss_summary = tf.summary.scalar('loss',self.loss) self.summary_writer = tf.summary.FileWriter(self.summary_dir,self.sess.graph) self.summary_op = tf.summary.merge([self.loss_summary]) #Note: Do not use tf.summary.merge_all() here. This will break encapsulation for # cross validation and lead to crashes when training multiple models # Add gradient ops to graph with learning rate self.train_op = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1).minimize(self.loss) self.vars = tf.trainable_variables() self.var_names = [var.name for var in self.vars] #print "Trainable variables:\n" #for vname in self.var_names: # print vname self.saver = tf.train.Saver() self.init_op = tf.global_variables_initializer() #Important note: Restoring model does not require init_op. #In fact calling tf.global_variables_initializer() after loading a model #will overwrite loaded weights self.sess.run(self.init_op) self.load(self.checkpoint_dir) def eval_model_metrics(self, batcher, save_plots=False, image_name ='metrics.png', eval_batch_size=50): """ Note: This method only works for binary classification as auPRC and auROC graphs only apply to binary classificaton problems. TODO: Modify this code to perform auROC generation for one-vs-all in the case of multiclass classification. """ #Ref: http://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics ##auROC calculations #Keep batch size at 1 for now to ensure 1 full epoch is evaluated all_labels = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32) all_probs = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32) #num_correct = 0 #counts number of correct predictions num_whole_pulls = batcher.num_records//eval_batch_size num_single_pulls = batcher.num_records%eval_batch_size num_steps = num_whole_pulls+num_single_pulls for i in range(num_steps): if i<num_whole_pulls: batch_size=eval_batch_size else: batch_size=1 labels_batch, dna_seq_batch = batcher.pull_batch(batch_size) feed_dict = { self.dna_seq_placeholder:dna_seq_batch, self.labels_placeholder:labels_batch, self.keep_prob_placeholder:1.0 } cur_prob= self.sess.run(self.probs,feed_dict=feed_dict) #Fill labels array if batch_size > 1: start_ind = batch_size*i elif batch_size == 1: start_ind = num_whole_pulls*eval_batch_size+(i-num_whole_pulls) else: print "Never reach this condition" all_labels[start_ind:start_ind+batch_size,:] = labels_batch all_probs[start_ind:start_ind+batch_size,:] = cur_prob #Calculate metrics and save results in a dict md = self.calc_classifier_metrics(all_labels,all_probs) md["epoch"]=self.epoch md["step"]=self.step #print "Testing accuracy",float(num_correct)/float(batcher.num_records) print 'Num examples: %d Num correct: %d Accuracy: %0.04f' % \ (batcher.num_records, md["num_correct"], md["accuracy"])+'\n' if save_plots: ###Plot some metrics plot_colors = cycle(['cyan','blue','orange','teal']) #print "Labels shape",all_labels.shape #print "Probs shape",all_probs.shape #print "Preds shape",all_preds.shape #Generate auROC plot axes fig1,ax1 = plt.subplots(2) fig1.subplots_adjust(bottom=0.2) ax1[0].plot([0,1],[0,1],color='navy',lw=2,linestyle='--') ax1[0].set_xbound(0.0,1.0) ax1[0].set_ybound(0.0,1.05) ax1[0].set_xlabel('False Positive Rate') ax1[0].set_ylabel('True Positive Rate') ax1[0].set_title('auROC') #plt.legend(loc='lower right') ax1[0].plot(md["fpr"],md["tpr"],color=plot_colors.next(), lw=2,linestyle='-',label='auROC curve (area=%0.2f)' % md["auroc"] ) #Generate auPRC plot axes #ax1[1].plot([0,1],[1,1],color='royalblue',lw=2,linestyle='--') ax1[1].set_xlabel('Precision') ax1[1].set_ylabel('Recall') ax1[1].set_title('auPRC') ax1[1].plot(md["thresh_precision"],md["thresh_recall"],color=plot_colors.next(), lw=2,linestyle='-',label='auPRC curve (area=%0.2f)' % md["auprc"] ) ax1[1].set_xbound(0.0,1.0) ax1[1].set_ybound(0.0,1.05) #Note: avg prec score is the area under the prec recall curve #Note: Presumably class 1 (pos examples) should be the only f1 score we focus on #print "F1 score for class",i,"is",f1_score plt.tight_layout() plt_fname = self.save_dir+os.sep+image_name print "Saving auROC image to",plt_fname fig1.savefig(plt_fname) #Return metrics dictionary return md def calc_classifier_metrics(self,all_labels,all_probs): """Calculate some metrics for the dataset return dictionary with metrics :param all_probs: nx2 prob values :param all_labels: nx2 labels :returns: dictionary of metrics :rtype: dict() """ num_records = all_probs.shape[0] all_preds = np.zeros((num_records, self.num_classes),dtype = np.float32) all_preds[np.arange(num_records),all_probs.argmax(1)] = 1 #Calculate accuracy num_correct = metrics.accuracy_score(all_labels[:,self.pos_index],all_preds[:,self.pos_index],normalize=False) accuracy = num_correct/float(all_preds.shape[0]) ###Calculate auROC #http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html #metrics.roc_curve(y_true, y_score[, ...]) #y_score is probs fpr,tpr,_ = metrics.roc_curve(all_labels[:,self.pos_index], all_probs[:,self.pos_index], pos_label=self.pos_index) auroc = metrics.auc(fpr,tpr) thresh_precision,thresh_recall,prc_thresholds = metrics.precision_recall_curve( all_labels[:,self.pos_index], all_probs[:,self.pos_index]) #Calculate precision, recall, and f1-score for threshold = 0.5 #confusion_matrix = metrics.confusion_matrix(all_labels[:,self.pos_index],all_probs[:,self.pos_index]) precision, recall, f1_score, support = metrics.precision_recall_fscore_support( all_labels[:,self.pos_index], all_preds[:,self.pos_index], pos_label=self.pos_index) precision = precision[self.pos_index] recall = recall[self.pos_index] f1_score = f1_score[self.pos_index] support = support[self.pos_index] auprc = metrics.average_precision_score(all_labels[:,self.pos_index], all_probs[:,self.pos_index]) return OrderedDict([ ("num_correct",num_correct), ("accuracy",accuracy), ("auroc",auroc), ("auprc",auprc), ("fpr",fpr), ("tpr",tpr), ("precision",precision), ("recall",recall), ("f1_score",f1_score), ("support",support), ("thresh_precision",thresh_precision), ("thresh_recall",thresh_recall), ("prc_thresholds",prc_thresholds) ])
gpl-3.0
darioml/fyp-public
tests/backprop__.py
1
2074
__author__ = 'darioml' # Common library for backprop testing from backprop.nn_scipy_opti import NN_1HL import numpy as np from sklearn import cross_validation from sklearn.metrics import accuracy_score,confusion_matrix import matplotlib.pyplot as plt from pprint import pprint import scipy.io import time import datetime import os def get_data(filename='../data/mat/ball.mat', flatten=True): ## Let's get our data data_file = scipy.io.loadmat(filename) data = np.array(data_file['X']) labels = np.array(data_file['Y'], 'uint8').T if flatten is True: labels = labels.flatten() return labels, data/255 def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(4) plt.xticks(tick_marks, range(4)) #, rotation=45 plt.yticks(tick_marks, range(4)) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') def debug_print(debug, statement): if debug is True: print statement def test_simple_backprop(data, labels, hidden_nodes, iterations, maxiter=200, debug=False, plot=False): times = [] accuracy = [] debug_print(debug, 'Starting iteration - Nodes: %i, Reps: %i, Max Iters: %i' % (hidden_nodes, iterations, maxiter)) for i in range(iterations): X_train, X_test, y_train, y_test = cross_validation.train_test_split(data, labels, test_size=0.2) nn = NN_1HL(maxiter=maxiter, hidden_layer_size=hidden_nodes) time_now = time.time() nn.fit(X_train, y_train) times.append( time.time() - time_now ) accuracy.append(accuracy_score(y_test, nn.predict(X_test))) debug_print(debug, ' Finished %i of %i' % (i, iterations)) return np.mean(accuracy),np.mean(times),accuracy,times def save(filename, __file_append=datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H%M%S') + "_", **kwargs): scipy.io.savemat('../results/' + __file_append + filename, kwargs)
gpl-2.0
lisa-lab/pylearn2
pylearn2/training_algorithms/sgd.py
32
48169
""" Stochastic Gradient Descent and related functionality such as learning rate adaptation, momentum, and Polyak averaging. """ from __future__ import division __authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow, David Warde-Farley"] __license__ = "3-clause BSD" __maintainer__ = "David Warde-Farley" __email__ = "pylearn-dev@googlegroups" import logging import warnings import numpy as np from theano.compat import six from theano import config from theano import function from theano.gof.op import get_debug_values from pylearn2.compat import OrderedDict, first_key from pylearn2.monitor import Monitor from pylearn2.space import CompositeSpace, NullSpace from pylearn2.train_extensions import TrainExtension from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm from pylearn2.training_algorithms.learning_rule import Momentum from pylearn2.training_algorithms.learning_rule import ( MomentumAdjustor as LRMomentumAdjustor) from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size from pylearn2.utils import py_integer_types, py_float_types from pylearn2.utils import safe_zip from pylearn2.utils import serial from pylearn2.utils import sharedX from pylearn2.utils import contains_nan from pylearn2.utils import contains_inf from pylearn2.utils import isfinite from pylearn2.utils.data_specs import DataSpecsMapping from pylearn2.utils.exc import reraise_as from pylearn2.utils.timing import log_timing from pylearn2.utils.rng import make_np_rng log = logging.getLogger(__name__) class SGD(TrainingAlgorithm): """ SGD = (Minibatch) Stochastic Gradient Descent. A TrainingAlgorithm that does stochastic gradient descent on minibatches of training examples. For theoretical background on this algorithm, see Yoshua Bengio's machine learning course notes on the subject: http://www.iro.umontreal.ca/~pift6266/H10/notes/gradient.html Parameters ---------- learning_rate : float The learning rate to use. Train object callbacks can change the learning rate after each epoch. SGD update_callbacks can change it after each minibatch. cost : pylearn2.costs.cost.Cost, optional Cost object specifying the objective function to be minimized. Optionally, may be None. In this case, SGD will call the model's get_default_cost method to obtain the objective function. batch_size : int, optional The size of the batch to be used. If not specified, the model will be asked for the batch size, so you must have specified the batch size there. (Some models are rigidly defined to only work with one batch size) monitoring_batch_size : int, optional The size of the monitoring batches. monitoring_batches : int, optional At the start of each epoch, we run "monitoring", to evaluate quantities such as the validation set error. monitoring_batches, if specified, determines the number of batches to draw from the iterator for each monitoring dataset. Unnecessary if not using monitoring or if `monitor_iteration_mode` is 'sequential' and `batch_size` is specified (number of batches will be calculated based on full dataset size). TODO: make it possible to specify different monitoring_batches for each monitoring dataset. The Monitor itself already supports this. monitoring_dataset : Dataset or dictionary, optional If not specified, no monitoring is used. If specified to be a Dataset, monitor on that Dataset. If specified to be dictionary, the keys should be string names of datasets, and the values should be Datasets. All monitoring channels will be computed for all monitoring Datasets and will have the dataset name and an underscore prepended to them. monitor_iteration_mode : str, optional The iteration mode used to iterate over the examples in all monitoring datasets. If not specified, defaults to 'sequential'. TODO: make it possible to specify different modes for different datasets. termination_criterion : instance of \ pylearn2.termination_criteria.TerminationCriterion, optional Used to determine when the algorithm should stop running. If not specified, runs forever--or more realistically, until external factors halt the python process (Kansas 1977). update_callbacks : list, optional If specified, each member of the list should be a callable that accepts an SGD instance as its only argument. All callbacks will be called with this SGD instance after each SGD step. learning_rule : training_algorithms.learning_rule.LearningRule, optional A learning rule computes the new parameter values given old parameters and first-order gradients. If learning_rule is None, sgd.SGD will update parameters according to the standard SGD learning rule: .. code-block:: none param := param - learning_rate * d cost / d param This argument allows more sophisticated learning rules, such as SGD with momentum. set_batch_size : bool, optional Defaults to False. If True, and batch_size conflicts with model.force_batch_size, will call model.set_batch_size(batch_size) in an attempt to change model.force_batch_size train_iteration_mode : str, optional Defaults to 'shuffled_sequential'. The iteration mode to use for iterating through training examples. batches_per_iter : int, optional The number of batches to draw from the iterator over training examples. If iteration mode is 'sequential' or 'shuffled_sequential', this is unnecessary; when unspecified we will iterate over all examples. theano_function_mode : a valid argument to theano.function's \ 'mode' parameter, optional The theano mode to compile the updates function with. Note that pylearn2 includes some wraplinker modes that are not bundled with theano. See pylearn2.devtools. These extra modes let you do things like check for NaNs at every step, or record md5 digests of all computations performed by the update function to help isolate problems with nondeterminism. monitoring_costs : OrderedDict, optional A dictionary of Cost instances. Keys should be string containing the name of the cost. The Monitor will also include all channels defined by these Costs, even though we don't train using them. seed : valid argument to np.random.RandomState, optional The seed used for the random number generate to be passed to the training dataset iterator (if any) """ def __init__(self, learning_rate, cost=None, batch_size=None, monitoring_batch_size=None, monitoring_batches=None, monitoring_dataset=None, monitor_iteration_mode='sequential', termination_criterion=None, update_callbacks=None, learning_rule=None, set_batch_size=False, train_iteration_mode=None, batches_per_iter=None, theano_function_mode=None, monitoring_costs=None, seed=[2012, 10, 5]): if isinstance(cost, (list, tuple, set)): raise TypeError("SGD no longer supports using collections of " + "Costs to represent a sum of Costs. Use " + "pylearn2.costs.cost.SumOfCosts instead.") self.learning_rule = learning_rule self.learning_rate = sharedX(learning_rate, 'learning_rate') self.cost = cost self.batch_size = batch_size self.set_batch_size = set_batch_size self.batches_per_iter = batches_per_iter self._set_monitoring_dataset(monitoring_dataset) self.monitoring_batch_size = monitoring_batch_size self.monitoring_batches = monitoring_batches self.monitor_iteration_mode = monitor_iteration_mode if monitoring_dataset is None: if monitoring_batch_size is not None: raise ValueError("Specified a monitoring batch size " + "but not a monitoring dataset.") if monitoring_batches is not None: raise ValueError("Specified an amount of monitoring batches " + "but not a monitoring dataset.") self.termination_criterion = termination_criterion self._register_update_callbacks(update_callbacks) if train_iteration_mode is None: train_iteration_mode = 'shuffled_sequential' self.train_iteration_mode = train_iteration_mode self.first = True self.rng = make_np_rng(seed, which_method=["randn", "randint"]) self.theano_function_mode = theano_function_mode self.monitoring_costs = monitoring_costs def _setup_monitor(self): """ Set up monitor to model the objective value, learning rate, momentum (if applicable), and extra channels defined by the cost. This method must be called after `learning_rule.get_updates`, since it may have an effect on `learning_rule.add_channels_to_monitor` (that is currently the case for `learning_rule.RMSProp`). """ if bool(self.monitoring_dataset): if (self.monitoring_batch_size is None and self.monitoring_batches is None): self.monitoring_batch_size = self.batch_size self.monitoring_batches = self.batches_per_iter self.monitor.setup(dataset=self.monitoring_dataset, cost=self.cost, batch_size=self.monitoring_batch_size, num_batches=self.monitoring_batches, extra_costs=self.monitoring_costs, mode=self.monitor_iteration_mode) dataset_name = first_key(self.monitoring_dataset) monitoring_dataset = self.monitoring_dataset[dataset_name] # TODO: have Monitor support non-data-dependent channels self.monitor.add_channel(name='learning_rate', ipt=None, val=self.learning_rate, data_specs=(NullSpace(), ''), dataset=monitoring_dataset) if self.learning_rule: self.learning_rule.add_channels_to_monitor( self.monitor, monitoring_dataset) def setup(self, model, dataset): """ Compiles the theano functions needed for the train method. Parameters ---------- model : a Model instance dataset : Dataset """ if self.cost is None: self.cost = model.get_default_cost() inf_params = [param for param in model.get_params() if contains_inf(param.get_value())] if len(inf_params) > 0: raise ValueError("These params are Inf: "+str(inf_params)) if any([contains_nan(param.get_value()) for param in model.get_params()]): nan_params = [param for param in model.get_params() if contains_nan(param.get_value())] raise ValueError("These params are NaN: "+str(nan_params)) self.model = model self._synchronize_batch_size(model) model._test_batch_size = self.batch_size self.monitor = Monitor.get_monitor(model) self.monitor._sanity_check() # test if force batch size and batch size has_force_batch_size = getattr(model, "force_batch_size", False) train_dataset_is_uneven = \ dataset.get_num_examples() % self.batch_size != 0 has_monitoring_datasets = bool(self.monitoring_dataset) if has_monitoring_datasets: monitoring_datasets_are_uneven = \ any(d.get_num_examples() % self.batch_size != 0 for d in self.monitoring_dataset.values()) else: monitoring_datasets_are_uneven = False # or True it doesn't matter if has_force_batch_size and train_dataset_is_uneven and \ not has_uniform_batch_size(self.train_iteration_mode): raise ValueError("Dataset size is not a multiple of batch size." "You should set train_iteration_mode (and " "maybe monitor_iteration_mode) to " "even_sequential, even_shuffled_sequential or " "even_batchwise_shuffled_sequential") if has_force_batch_size and has_monitoring_datasets and \ monitoring_datasets_are_uneven and \ not has_uniform_batch_size(self.monitor_iteration_mode): raise ValueError("Dataset size is not a multiple of batch size." "You should set monitor_iteration_mode to " "even_sequential, even_shuffled_sequential or " "even_batchwise_shuffled_sequential") data_specs = self.cost.get_data_specs(self.model) mapping = DataSpecsMapping(data_specs) space_tuple = mapping.flatten(data_specs[0], return_tuple=True) source_tuple = mapping.flatten(data_specs[1], return_tuple=True) # Build a flat tuple of Theano Variables, one for each space. # We want that so that if the same space/source is specified # more than once in data_specs, only one Theano Variable # is generated for it, and the corresponding value is passed # only once to the compiled Theano function. theano_args = [] for space, source in safe_zip(space_tuple, source_tuple): name = '%s[%s]' % (self.__class__.__name__, source) arg = space.make_theano_batch(name=name, batch_size=self.batch_size) theano_args.append(arg) theano_args = tuple(theano_args) # Methods of `self.cost` need args to be passed in a format compatible # with data_specs nested_args = mapping.nest(theano_args) fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args) self.on_load_batch = fixed_var_descr.on_load_batch cost_value = self.cost.expr(model, nested_args, ** fixed_var_descr.fixed_vars) if cost_value is not None and cost_value.name is None: # Concatenate the name of all tensors in theano_args !? cost_value.name = 'objective' learning_rate = self.learning_rate params = list(model.get_params()) assert len(params) > 0 for i, param in enumerate(params): if param.name is None: param.name = 'sgd_params[%d]' % i grads, updates = self.cost.get_gradients(model, nested_args, ** fixed_var_descr.fixed_vars) if not isinstance(grads, OrderedDict): raise TypeError(str(type(self.cost)) + ".get_gradients returned " + "something with" + str(type(grads)) + "as its " + "first member. Expected OrderedDict.") for param in grads: assert param in params for param in params: assert param in grads for param in grads: if grads[param].name is None and cost_value is not None: grads[param].name = ('grad(%(costname)s, %(paramname)s)' % {'costname': cost_value.name, 'paramname': param.name}) assert grads[param].dtype == param.dtype lr_scalers = model.get_lr_scalers() for key in lr_scalers: if key not in params: raise ValueError( "Tried to scale the learning rate on " + str(key) + " which is not an optimization parameter.") log.info('Parameter and initial learning rate summary:') for param in params: param_name = param.name if param_name is None: param_name = 'anon_param' lr = learning_rate.get_value() * lr_scalers.get(param, 1.) log.info('\t' + param_name + ': ' + str(lr)) if self.learning_rule: updates.update(self.learning_rule.get_updates( learning_rate, grads, lr_scalers)) else: # Use standard SGD updates with fixed learning rate. updates.update(dict(safe_zip(params, [param - learning_rate * lr_scalers.get(param, 1.) * grads[param] for param in params]))) for param in params: if updates[param].name is None: updates[param].name = 'sgd_update(' + param.name + ')' model.modify_updates(updates) for param in params: update = updates[param] if update.name is None: update.name = 'censor(sgd_update(' + param.name + '))' for update_val in get_debug_values(update): if contains_inf(update_val): raise ValueError("debug value of %s contains infs" % update.name) if contains_nan(update_val): raise ValueError("debug value of %s contains nans" % update.name) # Set up monitor to model the objective value, learning rate, # momentum (if applicable), and extra channels defined by # the cost. # We have to do that after learning_rule.get_updates has been # called, since it may have an effect on # learning_rule.add_channels_to_monitor (that is currently the case # for AdaDelta and RMSProp). self._setup_monitor() with log_timing(log, 'Compiling sgd_update'): self.sgd_update = function(theano_args, updates=updates, name='sgd_update', on_unused_input='ignore', mode=self.theano_function_mode) self.params = params def train(self, dataset): """ Runs one epoch of SGD training on the specified dataset. Parameters ---------- dataset : Dataset """ if not hasattr(self, 'sgd_update'): raise Exception("train called without first calling setup") # Make sure none of the parameters have bad values for param in self.params: value = param.get_value(borrow=True) if not isfinite(value): raise RuntimeError("NaN in " + param.name) self.first = False rng = self.rng if not is_stochastic(self.train_iteration_mode): rng = None data_specs = self.cost.get_data_specs(self.model) # The iterator should be built from flat data specs, so it returns # flat, non-redundent tuples of data. mapping = DataSpecsMapping(data_specs) space_tuple = mapping.flatten(data_specs[0], return_tuple=True) source_tuple = mapping.flatten(data_specs[1], return_tuple=True) if len(space_tuple) == 0: # No data will be returned by the iterator, and it is impossible # to know the size of the actual batch. # It is not decided yet what the right thing to do should be. raise NotImplementedError( "Unable to train with SGD, because " "the cost does not actually use data from the data set. " "data_specs: %s" % str(data_specs)) flat_data_specs = (CompositeSpace(space_tuple), source_tuple) iterator = dataset.iterator(mode=self.train_iteration_mode, batch_size=self.batch_size, data_specs=flat_data_specs, return_tuple=True, rng=rng, num_batches=self.batches_per_iter) on_load_batch = self.on_load_batch for batch in iterator: for callback in on_load_batch: callback(*batch) self.sgd_update(*batch) # iterator might return a smaller batch if dataset size # isn't divisible by batch_size # Note: if data_specs[0] is a NullSpace, there is no way to know # how many examples would actually have been in the batch, # since it was empty, so actual_batch_size would be reported as 0. actual_batch_size = flat_data_specs[0].np_batch_size(batch) self.monitor.report_batch(actual_batch_size) for callback in self.update_callbacks: callback(self) # Make sure none of the parameters have bad values for param in self.params: value = param.get_value(borrow=True) if not isfinite(value): raise RuntimeError("NaN in " + param.name) def continue_learning(self, model): """ Returns True if the algorithm should continue running, or False if it has reached convergence / started overfitting and should stop. Parameters ---------- model : a Model instance """ if self.termination_criterion is None: return True else: return self.termination_criterion.continue_learning(self.model) class MonitorBasedLRAdjuster(TrainExtension): """ A TrainExtension that uses the on_monitor callback to adjust the learning rate on each epoch. It pulls out a channel from the model's monitor and adjusts the learning rate based on what happened to the monitoring channel on the last epoch. If the channel is greater than high_trigger times its previous value, the learning rate will be scaled by shrink_amt (which should be < 1 for this scheme to make sense). The idea is that in this case the learning algorithm is overshooting the bottom of the objective function. If the objective is less than high_trigger but greater than low_trigger times its previous value, the learning rate will be scaled by grow_amt (which should be > 1 for this scheme to make sense). The idea is that the learning algorithm is making progress but at too slow of a rate. Parameters ---------- high_trigger : float, optional See class-level docstring low_trigger : float, optional See class-level docstring grow_amt : float, optional See class-level docstring min_lr : float, optional All updates to the learning rate are clipped to be at least this value. max_lr : float, optional All updates to the learning rate are clipped to be at most this value. dataset_name : str, optional If specified, use dataset_name + "_objective" as the channel to guide the learning rate adaptation. channel_name : str, optional If specified, use channel_name as the channel to guide the learning rate adaptation. Conflicts with dataset_name. If neither dataset_name nor channel_name is specified, uses "objective" """ def __init__(self, high_trigger=1., shrink_amt=.99, low_trigger=.99, grow_amt=1.01, min_lr=1e-7, max_lr=1., dataset_name=None, channel_name=None): self.high_trigger = high_trigger self.shrink_amt = shrink_amt self.low_trigger = low_trigger self.grow_amt = grow_amt self.min_lr = min_lr self.max_lr = max_lr self.dataset_name = None if channel_name is not None: self.channel_name = channel_name else: if dataset_name is not None: self.channel_name = dataset_name + '_objective' self.dataset_name = dataset_name else: self.channel_name = None def on_monitor(self, model, dataset, algorithm): """ Adjusts the learning rate based on the contents of model.monitor Parameters ---------- model : a Model instance dataset : Dataset algorithm : WRITEME """ model = algorithm.model lr = algorithm.learning_rate current_learning_rate = lr.get_value() assert hasattr(model, 'monitor'), ("no monitor associated with " + str(model)) monitor = model.monitor monitor_channel_specified = True if self.channel_name is None: monitor_channel_specified = False channels = [elem for elem in monitor.channels if elem.endswith("objective")] if len(channels) < 1: raise ValueError( "There are no monitoring channels that end " "with \"objective\". Please specify either " "channel_name or dataset_name.") elif len(channels) > 1: datasets = algorithm.monitoring_dataset.keys() raise ValueError( "There are multiple monitoring channels that" "end with \"_objective\". The list of available " "datasets are: " + str(datasets) + " . Please specify either " "channel_name or dataset_name in the " "MonitorBasedLRAdjuster constructor to " 'disambiguate.') else: self.channel_name = channels[0] warnings.warn('The channel that has been chosen for ' 'monitoring is: ' + str(self.channel_name) + '.') try: v = monitor.channels[self.channel_name].val_record except KeyError: err_input = '' if monitor_channel_specified: if self.dataset_name: err_input = 'The dataset_name \'' + str( self.dataset_name) + '\' is not valid.' else: err_input = 'The channel_name \'' + str( self.channel_name) + '\' is not valid.' err_message = 'There is no monitoring channel named \'' + \ str(self.channel_name) + '\'. You probably need to ' + \ 'specify a valid monitoring channel by using either ' + \ 'dataset_name or channel_name in the ' + \ 'MonitorBasedLRAdjuster constructor. ' + err_input reraise_as(ValueError(err_message)) if len(v) < 1: if monitor.dataset is None: assert len(v) == 0 raise ValueError( "You're trying to use a monitor-based " "learning rate adjustor but the monitor has no " "entries because you didn't specify a " "monitoring dataset.") raise ValueError( "For some reason there are no monitor entries" "yet the MonitorBasedLRAdjuster has been " "called. This should never happen. The Train" " object should call the monitor once on " "initialization, then call the callbacks. " "It seems you are either calling the " "callback manually rather than as part of a " "training algorithm, or there is a problem " "with the Train object.") if len(v) == 1: # only the initial monitoring has happened # no learning has happened, so we can't adjust learning rate yet # just do nothing return rval = current_learning_rate log.info("monitoring channel is {0}".format(self.channel_name)) if v[-1] > self.high_trigger * v[-2]: rval *= self.shrink_amt log.info("shrinking learning rate to %f" % rval) elif v[-1] > self.low_trigger * v[-2]: rval *= self.grow_amt log.info("growing learning rate to %f" % rval) rval = max(self.min_lr, rval) rval = min(self.max_lr, rval) lr.set_value(np.cast[lr.dtype](rval)) class PatienceBasedTermCrit(object): """ A monitor-based termination criterion using a geometrically increasing amount of patience. If the selected channel has decreased by a certain proportion when comparing to the lowest value seen yet, the patience is set to a factor of the number of examples seen, which by default (patience_increase=2.) ensures the model has seen as many examples as the number of examples that lead to the lowest value before concluding a local optima has been reached. Note: Technically, the patience corresponds to a number of epochs to be independent of the size of the dataset, so be aware of that when choosing initial_patience. Parameters ---------- prop_decrease : float The factor X in the (1 - X) * best_value threshold initial_patience : int Minimal number of epochs the model has to run before it can stop patience_increase : float, optional The factor X in the patience = X * n_iter update. channel_name : string, optional Name of the channel to examine. If None and the monitor has only one channel, this channel will be used; otherwise, an error will be raised. """ def __init__(self, prop_decrease, initial_patience, patience_increase=2., channel_name=None): self._channel_name = channel_name self.prop_decrease = prop_decrease self.patience = initial_patience self.best_value = np.inf self.patience_increase = patience_increase def __call__(self, model): """ Returns True or False depending on whether the optimization should stop or not. The optimization should stop if it has run for a number of epochs superior to the patience without any improvement. Parameters ---------- model : Model The model used in the experiment and from which the monitor used in the termination criterion will be extracted. Returns ------- bool True or False, indicating if the optimization should stop or not. """ monitor = model.monitor # In the case the monitor has only one channel, the channel_name can # be omitted and the criterion will examine the only channel # available. However, if the monitor has multiple channels, leaving # the channel_name unspecified will raise an error. if self._channel_name is None: if len(monitor.channels) != 1: raise ValueError("Only single-channel monitors are supported " "for channel_name == None") v = monitor.channels.values()[0].val_record else: v = monitor.channels[self._channel_name].val_record # If the channel value decrease is higher than the threshold, we # update the best value to this value and we update the patience. if v[-1] < self.best_value * (1. - self.prop_decrease): # Using the max between actual patience and updated patience # ensures that the model will run for at least the initial # patience and that it would behave correctly if the user # chooses a dumb value (i.e. less than 1) self.patience = max(self.patience, len(v) * self.patience_increase) self.best_value = v[-1] return len(v) < self.patience class AnnealedLearningRate(object): """ This is a callback for the SGD algorithm rather than the Train object. This anneals the learning rate to decrease as 1/t where t is the number of gradient descent updates done so far. Use OneOverEpoch as Train object callback if you would prefer 1/t where t is epochs. Parameters ---------- anneal_start : int The epoch on which to begin annealing """ def __init__(self, anneal_start): self._initialized = False self._count = 0 self._anneal_start = anneal_start def __call__(self, algorithm): """ Updates the learning rate according to the annealing schedule. Parameters ---------- algorithm : WRITEME """ if not self._initialized: self._base = algorithm.learning_rate.get_value() self._initialized = True self._count += 1 algorithm.learning_rate.set_value(np.cast[config.floatX]( self.current_learning_rate())) def current_learning_rate(self): """ Returns the current desired learning rate according to the annealing schedule. """ return self._base * min(1, self._anneal_start / self._count) class ExponentialDecay(object): """ This is a callback for the `SGD` algorithm rather than the `Train` object. This anneals the learning rate by dividing by decay_factor after each gradient descent step. It will not shrink the learning rate beyond `min_lr`. Parameters ---------- decay_factor : float The learning rate at step t is given by `init_learning_rate / (decay_factor ** t)` min_lr : float The learning rate will be clipped to be at least this value """ def __init__(self, decay_factor, min_lr): if isinstance(decay_factor, str): decay_factor = float(decay_factor) if isinstance(min_lr, str): min_lr = float(min_lr) assert isinstance(decay_factor, float) assert isinstance(min_lr, float) self.__dict__.update(locals()) del self.self self._count = 0 self._min_reached = False def __call__(self, algorithm): """ Updates the learning rate according to the exponential decay schedule. Parameters ---------- algorithm : SGD The SGD instance whose `learning_rate` field should be modified. """ if self._count == 0: self._base_lr = algorithm.learning_rate.get_value() self._count += 1 if not self._min_reached: # If we keep on executing the exponentiation on each mini-batch, # we will eventually get an OverflowError. So make sure we # only do the computation until min_lr is reached. new_lr = self._base_lr / (self.decay_factor ** self._count) if new_lr <= self.min_lr: self._min_reached = True new_lr = self.min_lr else: new_lr = self.min_lr new_lr = np.cast[config.floatX](new_lr) algorithm.learning_rate.set_value(new_lr) class LinearDecay(object): """ This is a callback for the SGD algorithm rather than the Train object. This anneals the learning rate to decay_factor times of the initial value during time start till saturate. Parameters ---------- start : int The step at which to start decreasing the learning rate saturate : int The step at which to stop decreating the learning rate decay_factor : float `final learning rate = decay_factor * initial learning rate` """ def __init__(self, start, saturate, decay_factor): if isinstance(decay_factor, str): decay_factor = float(decay_factor) if isinstance(start, str): start = float(start) if isinstance(saturate, str): saturate = float(saturate) assert isinstance(decay_factor, float) assert isinstance(start, (py_integer_types, py_float_types)) assert isinstance(saturate, (py_integer_types, py_float_types)) assert saturate > start assert start > 0 self.__dict__.update(locals()) del self.self self._count = 0 def __call__(self, algorithm): """ Adjusts the learning rate according to the linear decay schedule Parameters ---------- algorithm : WRITEME """ if self._count == 0: self._base_lr = algorithm.learning_rate.get_value() self._step = ((self._base_lr - self._base_lr * self.decay_factor) / (self.saturate - self.start + 1)) self._count += 1 if self._count >= self.start: if self._count < self.saturate: new_lr = self._base_lr - self._step * (self._count - self.start + 1) else: new_lr = self._base_lr * self.decay_factor else: new_lr = self._base_lr assert new_lr > 0 new_lr = np.cast[config.floatX](new_lr) algorithm.learning_rate.set_value(new_lr) class EpochMonitor(object): """ This is a callback for the SGD algorithm rather than the Train object. It can log one-line progress summaries and/or full monitor updates at regular intervals within epochs, which can be useful for large datasets. Note that each monitor update increases the calculation time of the epoch. Parameters ---------- model : pylearn2 model instance The model being monitored tick_rate : int (optional) Log one-line updates every `tick_rate` batches monitor_rate : int (optional) Call full monitor updates within epochs every `monitor_rate` batches YAML usage ---------- model: &model !obj:pylearn2.models.mlp.MLP { ... }, algorithm: !obj:pylearn2.training_algorithms.sgd.SGD { update_callbacks: [ !obj:pylearn2.training_algorithms.sgd.EpochMonitor { model: *model, tick_rate: 20, monitor_rate: 110 }], ... } """ def __init__(self, model, tick_rate=None, monitor_rate=None): self.model = model self.tick_rate = tick_rate self.monitor_rate = monitor_rate self.batches = 0 self.epoch = 1 def __call__(self, algorithm): if self.model.monitor.get_epochs_seen() == self.epoch: self.epoch += 1 self.batches = 0 else: self.batches += 1 if self.monitor_rate and self.batches and not ( self.batches % self.monitor_rate): self.model.monitor.__call__() elif self.tick_rate and not self.batches % self.tick_rate: log.info('Epoch {}: {} batches seen'.format( self.epoch, self.batches)) class OneOverEpoch(TrainExtension): """ Scales the learning rate like one over # epochs Parameters ---------- start : int The epoch on which to start shrinking the learning rate half_life : int, optional How many epochs after start it will take for the learning rate to lose half its value for the first time (to lose the next half of its value will take twice as long) min_lr : float, optional The minimum value the learning rate can take on """ def __init__(self, start, half_life=None, min_lr=1e-6): self.__dict__.update(locals()) del self.self self._initialized = False self._count = 0 assert start >= 0 if half_life is None: self.half_life = start + 1 else: assert half_life > 0 def on_monitor(self, model, dataset, algorithm): """ Adjusts the learning rate according to the decay schedule. Parameters ---------- model : a Model instance dataset : Dataset algorithm : WRITEME """ if not self._initialized: self._init_lr = algorithm.learning_rate.get_value() if self._init_lr < self.min_lr: raise ValueError("The initial learning rate is smaller than " + "the minimum allowed learning rate.") self._initialized = True self._count += 1 algorithm.learning_rate.set_value(np.cast[config.floatX]( self.current_lr())) def current_lr(self): """ Returns the learning rate currently desired by the decay schedule. """ if self._count < self.start: scale = 1 else: scale = float(self.half_life) / float(self._count - self.start + self.half_life) lr = self._init_lr * scale clipped = max(self.min_lr, lr) return clipped class LinearDecayOverEpoch(TrainExtension): """ Scales the learning rate linearly on each epochs Parameters ---------- start : int The epoch on which to start shrinking the learning rate saturate : int The epoch to saturate the shrinkage decay_factor : float The final value would be initial learning rate times decay_factor """ def __init__(self, start, saturate, decay_factor): self.__dict__.update(locals()) del self.self self._initialized = False self._count = 0 assert isinstance(decay_factor, float) assert isinstance(start, (py_integer_types, py_float_types)) assert isinstance(saturate, (py_integer_types, py_float_types)) assert saturate > start assert start >= 0 assert saturate >= start def setup(self, model, dataset, algorithm): """ Initializes the decay schedule based on epochs_seen. Parameters ---------- model : pylearn2.models.Model The model to which the training algorithm is applied. dataset : pylearn2.datasets.Dataset The dataset to which the model is applied. algorithm : pylearn2.training_algorithms.TrainingAlgorithm Describes how gradients should be updated. """ monitor = Monitor.get_monitor(model) self._count = monitor.get_epochs_seen() self._apply_learning_rate(algorithm) def on_monitor(self, model, dataset, algorithm): """ Updates the learning rate based on the linear decay schedule. Parameters ---------- model : a Model instance dataset : Dataset algorithm : WRITEME """ self._count += 1 self._apply_learning_rate(algorithm) def _apply_learning_rate(self, algorithm): """ Updates the learning rate on algorithm based on the epochs elapsed. """ if not self._initialized: self._init_lr = algorithm.learning_rate.get_value() self._step = ((self._init_lr - self._init_lr * self.decay_factor) / (self.saturate - self.start + 1)) self._initialized = True algorithm.learning_rate.set_value(np.cast[config.floatX]( self.current_lr())) def current_lr(self): """ Returns the learning rate currently desired by the decay schedule. """ if self._count >= self.start: if self._count < self.saturate: new_lr = self._init_lr - self._step * (self._count - self.start + 1) else: new_lr = self._init_lr * self.decay_factor else: new_lr = self._init_lr assert new_lr > 0 return new_lr class _PolyakWorker(object): """ Only to be used by the PolyakAveraging TrainingCallback below. Do not use directly. A callback for the SGD class. Parameters ---------- model : a Model The model whose parameters we want to train with Polyak averaging """ def __init__(self, model): avg_updates = OrderedDict() t = sharedX(1.) self.param_to_mean = OrderedDict() for param in model.get_params(): mean = sharedX(param.get_value()) assert type(mean) == type(param) self.param_to_mean[param] = mean avg_updates[mean] = mean - (mean - param) / t avg_updates[t] = t + 1. self.avg = function([], updates=avg_updates) def __call__(self, algorithm): """ To be called after each SGD step. Updates the Polyak averaged-parameters for this model Parameters ---------- algorithm : WRITEME """ self.avg() class PolyakAveraging(TrainExtension): """ See "A Tutorial on Stochastic Approximation Algorithms for Training Restricted Boltzmann Machines and Deep Belief Nets" by Kevin Swersky et al This functionality is still a work in progress. Currently, your model needs to implement "add_polyak_channels" to use it. The problem is that Polyak averaging shouldn't modify the model parameters. It should keep a second copy that it averages in the background. This second copy doesn't get to come back in and affect the learning process though. (IG tried having the second copy get pushed back into the model once per epoch, but this turned out to be harmful, at least in limited tests) So we need a cleaner interface for monitoring the averaged copy of the parameters, and we need to make sure the saved model at the end uses the averaged parameters, not the parameters used for computing the gradients during training. TODO: make use of the new on_save callback instead of duplicating Train's save_freq flag Parameters ---------- start : int The epoch after which to start averaging (0 = start averaging immediately) save_path : str, optional WRITEME save_freq : int, optional WRITEME Notes ----- This is usually used with a fixed, rather than annealed learning rate. It may be used in conjunction with momentum. """ def __init__(self, start, save_path=None, save_freq=1): self.__dict__.update(locals()) del self.self self._count = 0 assert isinstance(start, py_integer_types) assert start >= 0 def on_monitor(self, model, dataset, algorithm): """ Make sure Polyak-averaged model gets monitored. Save the model if necessary. Parameters ---------- model : a Model instance dataset : Dataset algorithm : WRITEME """ if self._count == self.start: self._worker = _PolyakWorker(model) algorithm.update_callbacks.append(self._worker) # HACK try: model.add_polyak_channels(self._worker.param_to_mean, algorithm.monitoring_dataset) except AttributeError: pass elif self.save_path is not None and self._count > self.start and \ self._count % self.save_freq == 0: saved_params = OrderedDict() for param in model.get_params(): saved_params[param] = param.get_value() param.set_value(self._worker.param_to_mean[param].get_value()) serial.save(self.save_path, model) for param in model.get_params(): param.set_value(saved_params[param]) self._count += 1
bsd-3-clause