repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
shyamalschandra/scikit-learn | sklearn/linear_model/_ridge.py | 1 | 83523 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy import optimize
from scipy.sparse import linalg as sp_linalg
from ._base import LinearClassifierMixin, LinearModel
from ._base import _deprecate_normalize, _rescale_data
from ._sag import sag_solver
from ..base import MultiOutputMixin, RegressorMixin, is_classifier
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..utils.validation import _check_sample_weight
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics import check_scoring
from ..exceptions import ConvergenceWarning
from ..utils.sparsefuncs import mean_variance_axis
def _solve_sparse_cg(
X, y, alpha, max_iter=None, tol=1e-3, verbose=0, X_offset=None, X_scale=None
):
def _get_rescaled_operator(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
return X1
n_samples, n_features = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X1 = _get_rescaled_operator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype
)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol="legacy")
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype
)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(
C, y_column, maxiter=max_iter, tol=tol, atol="legacy"
)
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn(
"sparse_cg did not converge after %d iterations." % info,
ConvergenceWarning,
)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(
X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter
)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[:: n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[:: n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel()
A.flat[:: n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[:: n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn(
"Singular matrix in solving dual problem. Using "
"least-squares solution instead."
)
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[:: n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[:: n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(
K, target, sym_pos=True, overwrite_a=False
).ravel()
K.flat[:: n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _solve_lbfgs(
X, y, alpha, positive=True, max_iter=None, tol=1e-3, X_offset=None, X_scale=None
):
"""Solve ridge regression with LBFGS.
The main purpose is fitting with forcing coefficients to be positive.
For unconstrained ridge regression, there are faster dedicated solver methods.
Note that with positive bounds on the coefficients, LBFGS seems faster
than scipy.optimize.lsq_linear.
"""
n_samples, n_features = X.shape
options = {}
if max_iter is not None:
options["maxiter"] = max_iter
config = {
"method": "L-BFGS-B",
"tol": tol,
"jac": True,
"options": options,
}
if positive:
config["bounds"] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * np.sum(residual)
return f, grad
result = optimize.minimize(func, x0, **config)
if not result["success"]:
warnings.warn(
"The lbfgs solver did not converge. Try increasing max_iter "
f"or tol. Currently: max_iter={max_iter} and tol={tol}",
ConvergenceWarning,
)
coefs[i] = result["x"]
return coefs
def _get_valid_accept_sparse(is_X_sparse, solver):
if is_X_sparse and solver in ["auto", "sag", "saga"]:
return "csr"
else:
return ["csr", "csc", "coo"]
def ridge_regression(
X,
y,
alpha,
*,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-3,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
check_input=True,
):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {ndarray, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
alpha : float or array-like of shape (n_targets,)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000. For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-3
Precision of the solution.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
return _ridge_regression(
X,
y,
alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
positive=positive,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input,
)
def _ridge_regression(
X,
y,
alpha,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-3,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
X_scale=None,
X_offset=None,
check_input=True,
):
has_sw = sample_weight is not None
if solver == "auto":
if positive:
solver = "lbfgs"
elif return_intercept:
# sag supports fitting intercept directly
solver = "sag"
elif not sparse.issparse(X):
solver = "cholesky"
else:
solver = "sparse_cg"
if solver not in ("sparse_cg", "cholesky", "svd", "lsqr", "sag", "saga", "lbfgs"):
raise ValueError(
"Known solvers are 'sparse_cg', 'cholesky', 'svd'"
" 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver
)
if positive and solver != "lbfgs":
raise ValueError(
"When positive=True, only 'lbfgs' solver can be used. "
f"Please change solver {solver} to 'lbfgs' "
"or set positive=False."
)
if solver == "lbfgs" and not positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if return_intercept and solver != "sag":
raise ValueError(
"In Ridge, only 'sag' solver can directly fit the "
"intercept. Please change solver to 'sag' or set "
"return_intercept=False."
)
if check_input:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C")
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError(
"Number of samples in X and y does not correspond: %d != %d"
% (n_samples, n_samples_)
)
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ["sag", "saga"]:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError(
"Number of targets and number of penalties do not correspond: %d != %d"
% (alpha.size, n_targets)
)
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == "sparse_cg":
coef = _solve_sparse_cg(
X,
y,
alpha,
max_iter=max_iter,
tol=tol,
verbose=verbose,
X_offset=X_offset,
X_scale=X_scale,
)
elif solver == "lsqr":
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == "cholesky":
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
elif solver in ["sag", "saga"]:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {
"coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)
}
coef_, n_iter_, _ = sag_solver(
X,
target.ravel(),
sample_weight,
"squared",
alpha_i,
0,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
init,
is_saga=solver == "saga",
)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == "lbfgs":
coef = _solve_lbfgs(
X,
y,
alpha,
positive=positive,
tol=tol,
max_iter=max_iter,
X_offset=X_offset,
X_scale=X_scale,
)
if solver == "svd":
if sparse.issparse(X):
raise TypeError("SVD solver does not support sparse inputs currently")
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
solver="auto",
positive=False,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.positive = positive
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
self._normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)
X, y = self._validate_data(
X,
y,
accept_sparse=_accept_sparse,
dtype=_dtype,
multi_output=True,
y_numeric=True,
)
if self.solver == "lbfgs" and not self.positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if self.positive:
if self.solver not in ["auto", "lbfgs"]:
raise ValueError(
f"solver='{self.solver}' does not support positive fitting. Please"
" set the solver to 'auto' or 'lbfgs', or set `positive=False`"
)
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ["auto", "sparse_cg", "sag", "lbfgs"]:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'sparse_cg', 'sag', 'lbfgs' "
"or set `fit_intercept=False`".format(self.solver)
)
if self.solver == "lbfgs":
solver = "lbfgs"
elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4:
warnings.warn(
'"sag" solver requires many iterations to fit '
"an intercept with sparse inputs. Either set the "
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
"not standardized)."
)
solver = "sag"
else:
solver = "sparse_cg"
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X,
y,
self.fit_intercept,
self._normalize,
self.copy_X,
sample_weight=sample_weight,
return_mean=True,
)
if solver == "sag" and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver="sag",
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=True,
check_input=False,
)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg solver
params = {"X_offset": X_offset, "X_scale": X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=solver,
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=False,
check_input=False,
**params,
)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape (n_samples, n_targets)).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, ndarray of shape (n_targets,)}, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : bool, default=True
Whether to fit the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. ``X`` and ``y`` are expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-3
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.17
`random_state` to support Stochastic Average Gradient.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
:class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick.
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y)
Ridge()
"""
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
This classifier first converts the target values into ``{-1, 1}`` and
then treats the problem as a regression task (multi-output regression in
the multiclass case).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float, default=1e-3
Precision of the solution.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
Attributes
----------
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
Ridge : Ridge regression.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y)
0.9595...
"""
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
class_weight=None,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : object
Instance of the estimator.
"""
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)
X, y = self._validate_data(
X, y, accept_sparse=_accept_sparse, multi_output=True, y_numeric=False
)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith("multilabel"):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification"
% (self.__class__.__name__)
)
if self.class_weight:
# modify the sample weights with the corresponding class weight
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
"""Classes labels."""
return self._label_binarizer.classes_
def _check_gcv_mode(X, gcv_mode):
possible_gcv_modes = [None, "auto", "svd", "eigen"]
if gcv_mode not in possible_gcv_modes:
raise ValueError(
"Unknown value for 'gcv_mode'. Got {} instead of one of {}".format(
gcv_mode, possible_gcv_modes
)
)
if gcv_mode in ["eigen", "svd"]:
return gcv_mode
# if X has more rows than columns, use decomposition of X^T.X,
# otherwise X.X^T
if X.shape[0] > X.shape[1]:
return "svd"
return "eigen"
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
abs_cosine = np.abs(query.dot(vectors))
index = np.argmax(abs_cosine)
return index
class _X_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as centered and scaled X with an added intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_samples, n_features + 1))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True)
- self.sqrt_sw * self.X_mean.dot(v[:-1])
+ v[-1] * self.sqrt_sw
)
def _matmat(self, v):
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True)
- self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1])
+ v[-1] * self.sqrt_sw[:, None]
)
def _transpose(self):
return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)
class _XT_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as transposed centered and scaled X with an intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_features + 1, n_samples))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
n_features = self.shape[0]
res = np.empty(n_features, dtype=self.X.dtype)
res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - (
self.X_mean * self.sqrt_sw.dot(v)
)
res[-1] = np.dot(v, self.sqrt_sw)
return res
def _matmat(self, v):
n_features = self.shape[0]
res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)
res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[
:, None
] * self.sqrt_sw.dot(v)
res[-1] = np.dot(self.sqrt_sw, v)
return res
class _IdentityRegressor:
"""Fake regressor which will directly output the prediction."""
def decision_function(self, y_predict):
return y_predict
def predict(self, y_predict):
return y_predict
class _IdentityClassifier(LinearClassifierMixin):
"""Fake classifier which will directly output the prediction.
We inherit from LinearClassifierMixin to get the proper shape for the
output `y`.
"""
def __init__(self, classes):
self.classes_ = classes
def decision_function(self, y_predict):
return y_predict
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Leave-one-out Cross-Validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id).
Dual solution: c = G^-1y
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G^-1 = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G^-1)
The best score (negative mean squared error or user-provided scoring) is
stored in the `best_score_` attribute, and the selected hyperparameter in
`alpha_`.
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
copy_X=True,
gcv_mode=None,
store_cv_values=False,
is_clf=False,
alpha_per_target=False,
):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.is_clf = is_clf
self.alpha_per_target = alpha_per_target
@staticmethod
def _decomp_diag(v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
@staticmethod
def _diag_dot(D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)]
return D * B
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (
safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T,
X_mean,
)
def _compute_covariance(self, X, sqrt_sw):
"""Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
if not self.fit_intercept:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X.T, X, dense_output=True), X_mean
# this function only gets called for sparse X
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
weight_sum = sqrt_sw.dot(sqrt_sw)
return (
safe_sparse_dot(X.T, X, dense_output=True)
- weight_sum * np.outer(X_mean, X_mean),
X_mean,
)
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
"""Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitely centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
"""
intercept_col = scale = sqrt_sw
batch_size = X.shape[1]
diag = np.empty(X.shape[0], dtype=X.dtype)
for start in range(0, X.shape[0], batch_size):
batch = slice(start, min(X.shape[0], start + batch_size), 1)
X_batch = np.empty(
(X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype
)
if self.fit_intercept:
X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None]
X_batch[:, -1] = intercept_col[batch]
else:
X_batch = X[batch].A
diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
return diag
def _eigen_decompose_gram(self, X, y, sqrt_sw):
"""Eigendecomposition of X.X^T, used when n_samples <= n_features."""
# if X is dense it has already been centered in preprocessing
K, X_mean = self._compute_gram(X, sqrt_sw)
if self.fit_intercept:
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
K += np.outer(sqrt_sw, sqrt_sw)
eigvals, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return X_mean, eigvals, Q, QT_y
def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
"""
w = 1.0 / (eigvals + alpha)
if self.fit_intercept:
# the vector containing the square roots of the sample weights (1
# when no sample weights) is the eigenvector of XX^T which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight).
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, Q)
w[intercept_dim] = 0 # cancel regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_inverse_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def _eigen_decompose_covariance(self, X, y, sqrt_sw):
"""Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
"""
n_samples, n_features = X.shape
cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
if not self.fit_intercept:
cov = cov[:-1, :-1]
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
# when all samples have the same weight we add a column of 1
else:
cov[-1] = 0
cov[:, -1] = 0
cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
nullspace_dim = max(0, n_features - n_samples)
eigvals, V = linalg.eigh(cov)
# remove eigenvalues and vectors in the null space of X^T.X
eigvals = eigvals[nullspace_dim:]
V = V[:, nullspace_dim:]
return X_mean, eigvals, V, X
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
"""
w = 1 / (eigvals + alpha)
A = (V * w).dot(V.T)
AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
y_hat = safe_sparse_dot(X, AXy, dense_output=True)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
"""
# the vector [0, 0, ..., 0, 1]
# is the eigenvector of X^TX which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight), e.g. n when uniform sample weights.
intercept_sv = np.zeros(V.shape[0])
intercept_sv[-1] = 1
intercept_dim = _find_smallest_angle(intercept_sv, V)
w = 1 / (eigvals + alpha)
w[intercept_dim] = 1 / eigvals[intercept_dim]
A = (V * w).dot(V.T)
# add a column to X containing the square roots of sample weights
X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
AXy = A.dot(X_op.T.dot(y))
y_hat = X_op.dot(AXy)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
# return (1 - hat_diag), (y - y_hat)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
"""
if self.fit_intercept:
return self._solve_eigen_covariance_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
return self._solve_eigen_covariance_no_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
def _svd_decompose_design_matrix(self, X, y, sqrt_sw):
# X already centered
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
if self.fit_intercept:
# to emulate fit_intercept=True situation, add a column
# containing the square roots of the sample weights
# by centering, the other columns are orthogonal to that one
intercept_column = sqrt_sw[:, None]
X = np.hstack((X, intercept_column))
U, singvals, _ = linalg.svd(X, full_matrices=0)
singvals_sq = singvals ** 2
UT_y = np.dot(U.T, y)
return X_mean, singvals_sq, U, UT_y
def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
"""
w = ((singvals_sq + alpha) ** -1) - (alpha ** -1)
if self.fit_intercept:
# detect intercept column
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, U)
# cancel the regularization for the intercept
w[intercept_dim] = -(alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_inverse_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
_normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64],
multi_output=True,
y_numeric=True,
)
# alpha_per_target cannot be used in classifier mode. All subclasses
# of _RidgeGCV that are classifiers keep alpha_per_target at its
# default value: False, so the condition below should never happen.
assert not (self.is_clf and self.alpha_per_target)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if np.any(self.alphas <= 0):
raise ValueError(
"alphas must be strictly positive. Got {} containing some "
"negative or null value instead.".format(self.alphas)
)
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X,
y,
self.fit_intercept,
_normalize,
self.copy_X,
sample_weight=sample_weight,
)
gcv_mode = _check_gcv_mode(X, self.gcv_mode)
if gcv_mode == "eigen":
decompose = self._eigen_decompose_gram
solve = self._solve_eigen_gram
elif gcv_mode == "svd":
if sparse.issparse(X):
decompose = self._eigen_decompose_covariance
solve = self._solve_eigen_covariance
else:
decompose = self._svd_decompose_design_matrix
solve = self._solve_svd_design_matrix
n_samples = X.shape[0]
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
sqrt_sw = np.sqrt(sample_weight)
else:
sqrt_sw = np.ones(n_samples, dtype=X.dtype)
X_mean, *decomposition = decompose(X, y, sqrt_sw)
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
n_y = 1 if len(y.shape) == 1 else y.shape[1]
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if self.store_cv_values:
self.cv_values_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype)
best_coef, best_score, best_alpha = None, None, None
for i, alpha in enumerate(np.atleast_1d(self.alphas)):
G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition)
if error:
squared_errors = (c / G_inverse_diag) ** 2
if self.alpha_per_target:
alpha_score = -squared_errors.mean(axis=0)
else:
alpha_score = -squared_errors.mean()
if self.store_cv_values:
self.cv_values_[:, i] = squared_errors.ravel()
else:
predictions = y - (c / G_inverse_diag)
if self.store_cv_values:
self.cv_values_[:, i] = predictions.ravel()
if self.is_clf:
identity_estimator = _IdentityClassifier(classes=np.arange(n_y))
alpha_score = scorer(
identity_estimator, predictions, y.argmax(axis=1)
)
else:
identity_estimator = _IdentityRegressor()
if self.alpha_per_target:
alpha_score = np.array(
[
scorer(identity_estimator, predictions[:, j], y[:, j])
for j in range(n_y)
]
)
else:
alpha_score = scorer(
identity_estimator, predictions.ravel(), y.ravel()
)
# Keep track of the best model
if best_score is None:
# initialize
if self.alpha_per_target and n_y > 1:
best_coef = c
best_score = np.atleast_1d(alpha_score)
best_alpha = np.full(n_y, alpha)
else:
best_coef = c
best_score = alpha_score
best_alpha = alpha
else:
# update
if self.alpha_per_target and n_y > 1:
to_update = alpha_score > best_score
best_coef[:, to_update] = c[:, to_update]
best_score[to_update] = alpha_score[to_update]
best_alpha[to_update] = alpha
elif alpha_score > best_score:
best_coef, best_score, best_alpha = c, alpha_score, alpha
self.alpha_ = best_alpha
self.best_score_ = best_score
self.dual_coef_ = best_coef
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
X_offset += X_mean * X_scale
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, n_alphas
else:
cv_values_shape = n_samples, n_y, n_alphas
self.cv_values_ = self.cv_values_.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=False,
alpha_per_target=False,
):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.alpha_per_target = alpha_per_target
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
cv = self.cv
if cv is None:
estimator = _RidgeGCV(
self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target,
)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True are incompatible")
parameters = {"alpha": self.alphas}
solver = "sparse_cg" if sparse.issparse(X) else "auto"
model = RidgeClassifier if is_classifier(self) else Ridge
gs = GridSearchCV(
model(
fit_intercept=self.fit_intercept,
normalize=self.normalize,
solver=solver,
),
parameters,
cv=cv,
scoring=self.scoring,
)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.best_score_ = gs.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
if hasattr(estimator, "feature_names_in_"):
self.feature_names_in_ = estimator.feature_names_in_
return self
class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs efficient Leave-One-Out Cross-Validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
If using Leave-One-Out cross-validation, alphas must be positive.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
scoring : str, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If None, the negative mean squared error if cv is 'auto' or None
(i.e. when using leave-one-out cross-validation), and r2 score
otherwise.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {'auto', 'svd', eigen'}, default='auto'
Flag indicating which strategy to use when performing
Leave-One-Out Cross-Validation. Options are::
'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'
'svd' : force use of singular value decomposition of X when X is
dense, eigenvalue decomposition of X^T.X when X is sparse.
'eigen' : force computation via eigendecomposition of X.X^T
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending on the shape of the training data.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
alpha_per_target : bool, default=False
Flag indicating whether to optimize the alpha value (picked from the
`alphas` parameter list) for each target separately (for multi-output
settings: multiple prediction targets). When set to `True`, after
fitting, the `alpha_` attribute will contain a value for each target.
When set to `False`, a single alpha is used for all targets.
.. versionadded:: 0.24
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_alphas) or \
shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only available if
``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been
called, this attribute will contain the mean squared errors if
`scoring is None` otherwise it will contain standardized per point
prediction values.
coef_ : ndarray of shape (n_features) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float or ndarray of shape (n_targets,)
Estimated regularization parameter, or, if ``alpha_per_target=True``,
the estimated regularization parameter for each target.
best_score_ : float or ndarray of shape (n_targets,)
Score of base estimator with best alpha, or, if
``alpha_per_target=True``, a score for each target.
.. versionadded:: 0.23
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.5166...
"""
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation. Currently,
only the n_features > n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
scoring : str, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors if `scoring is None` otherwise it
will contain standardized per point prediction values.
coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
best_score_ : float
Score of base estimator with best alpha.
.. versionadded:: 0.23
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.9630...
"""
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
cv=None,
class_weight=None,
store_cv_values=False,
):
super().__init__(
alphas=alphas,
fit_intercept=fit_intercept,
normalize=normalize,
scoring=scoring,
cv=cv,
store_cv_values=store_cv_values,
)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc", "coo"],
multi_output=True,
y_numeric=False,
)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith("multilabel"):
y = column_or_1d(y, warn=True)
if self.class_weight:
# modify the sample weights with the corresponding class weight
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
target = Y if self.cv is None else y
_BaseRidgeCV.fit(self, X, target, sample_weight=sample_weight)
return self
@property
def classes_(self):
"""Classes labels."""
return self._label_binarizer.classes_
def _more_tags(self):
return {
"multilabel": True,
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
# FIXME: see
# https://github.com/scikit-learn/scikit-learn/issues/19858
# to track progress to resolve this issue
"check_classifiers_multilabel_output_format_predict": (
"RidgeClassifierCV.predict outputs an array of shape (25,) "
"instead of (25, 5)"
),
},
}
| bsd-3-clause |
Gustry/QGIS | tests/src/python/test_provider_postgres.py | 2 | 149310 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the postgres provider.
Note: to prepare the DB, you need to run the sql files specified in
tests/testdata/provider/testdata_pg.sh
Read tests/README.md about writing/launching tests with PostgreSQL.
Run with ctest -V -R PyQgsPostgresProvider
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
__author__ = 'Matthias Kuhn'
__date__ = '2015-04-23'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import psycopg2
import os
import time
from datetime import datetime
from qgis.core import (
QgsVectorLayer,
QgsVectorLayerExporter,
QgsFeatureRequest,
QgsFeatureSource,
QgsFeature,
QgsFieldConstraints,
QgsDataProvider,
NULL,
QgsVectorLayerUtils,
QgsSettings,
QgsTransactionGroup,
QgsReadWriteContext,
QgsRectangle,
QgsDefaultValue,
QgsCoordinateReferenceSystem,
QgsProject,
QgsWkbTypes,
QgsGeometry,
QgsProviderRegistry,
QgsVectorDataProvider,
QgsDataSourceUri,
QgsProviderConnectionException,
)
from qgis.gui import QgsGui, QgsAttributeForm
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir, QObject, QByteArray, QTemporaryDir
from qgis.PyQt.QtWidgets import QLabel
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
from utilities import unitTestDataPath, compareWkt
from providertestbase import ProviderTestCase
QGISAPP = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsPostgresProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layers
cls.vl = QgsVectorLayer(
cls.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."someData" (geom) sql=',
'test', 'postgres')
assert cls.vl.isValid()
cls.source = cls.vl.dataProvider()
cls.poly_vl = QgsVectorLayer(
cls.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=',
'test', 'postgres')
assert cls.poly_vl.isValid()
cls.poly_provider = cls.poly_vl.dataProvider()
QgsGui.editorWidgetRegistry().initEditors()
cls.con = psycopg2.connect(cls.dbconn)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def execSQLCommand(self, sql):
self.assertTrue(self.con)
cur = self.con.cursor()
self.assertTrue(cur)
cur.execute(sql)
cur.close()
self.con.commit()
def getSource(self):
# create temporary table for edit tests
self.execSQLCommand(
'DROP TABLE IF EXISTS qgis_test."editData" CASCADE')
self.execSQLCommand(
'CREATE TABLE qgis_test."editData" ( pk SERIAL NOT NULL PRIMARY KEY, cnt integer, name text, name2 text, num_char text, dt timestamp without time zone, "date" date, "time" time without time zone, geom public.geometry(Point, 4326))')
self.execSQLCommand("INSERT INTO qgis_test.\"editData\" (pk, cnt, name, name2, num_char, dt, \"date\", \"time\", geom) VALUES "
"(5, -200, NULL, 'NuLl', '5', TIMESTAMP '2020-05-04 12:13:14', '2020-05-02', '12:13:01', '0101000020E61000001D5A643BDFC751C01F85EB51B88E5340'),"
"(3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL, NULL),"
"(1, 100, 'Orange', 'oranGe', '1', TIMESTAMP '2020-05-03 12:13:14', '2020-05-03', '12:13:14', '0101000020E61000006891ED7C3F9551C085EB51B81E955040'),"
"(2, 200, 'Apple', 'Apple', '2', TIMESTAMP '2020-05-04 12:14:14', '2020-05-04', '12:14:14', '0101000020E6100000CDCCCCCCCC0C51C03333333333B35140'),"
"(4, 400, 'Honey', 'Honey', '4', TIMESTAMP '2021-05-04 13:13:14', '2021-05-04', '13:13:14', '0101000020E610000014AE47E17A5450C03333333333935340')")
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."editData" (geom) sql=',
'test', 'postgres')
return vl
def getEditableLayer(self):
return self.getSource()
def getEditableLayerWithCheckConstraint(self):
"""Returns the layer for attribute change CHECK constraint violation"""
return QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'id\' srid=4326 type=POINT table="public"."test_check_constraint" (geom) sql=', 'test_check_constraint', 'postgres')
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
return True
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
def uncompiledFilters(self):
return set(['"dt" = to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\')',
'"date" = to_date(\'www4ww5ww2020\',\'wwwdwwMwwyyyy\')',
'"time" = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')'])
def partiallyCompiledFilters(self):
return set([])
def getGeneratedColumnsData(self):
"""
return a tuple with the generated column test layer and the expected generated value
"""
cur = self.con.cursor()
cur.execute("SHOW server_version_num")
pgversion = int(cur.fetchone()[0])
# don't trigger this test when PostgreSQL versions earlier than 12.
if pgversion < 120000:
return (None, None)
else:
return (QgsVectorLayer(self.dbconn + ' sslmode=disable table="qgis_test"."generated_columns"', 'test', 'postgres'),
"""('test:'::text || ((pk)::character varying)::text)""")
# HERE GO THE PROVIDER SPECIFIC TESTS
def testDefaultValue(self):
self.source.setProviderProperty(
QgsDataProvider.EvaluateDefaultValues, True)
self.assertIsInstance(self.source.defaultValue(0), int)
self.assertEqual(self.source.defaultValue(1), NULL)
self.assertEqual(self.source.defaultValue(2), 'qgis')
self.source.setProviderProperty(
QgsDataProvider.EvaluateDefaultValues, False)
def testDefaultValueClause(self):
self.source.setProviderProperty(
QgsDataProvider.EvaluateDefaultValues, False)
self.assertEqual(self.source.defaultValueClause(
0), 'nextval(\'qgis_test."someData_pk_seq"\'::regclass)')
self.assertFalse(self.source.defaultValueClause(1))
self.assertEqual(self.source.defaultValueClause(2), '\'qgis\'::text')
def testDateTimeTypes(self):
vl = QgsVectorLayer('%s table="qgis_test"."date_times" sql=' % (
self.dbconn), "testdatetimes", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName(
'date_field')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName(
'time_field')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName(
'datetime_field')).type(), QVariant.DateTime)
f = next(vl.getFeatures(QgsFeatureRequest()))
date_idx = vl.fields().lookupField('date_field')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2004, 3, 4))
time_idx = vl.fields().lookupField('time_field')
self.assertIsInstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 41, 52))
datetime_idx = vl.fields().lookupField('datetime_field')
self.assertIsInstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(
QDate(2004, 3, 4), QTime(13, 41, 52)))
def testBooleanType(self):
vl = QgsVectorLayer('{} table="qgis_test"."boolean_table" sql='.format(
self.dbconn), "testbool", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(
fields.at(fields.indexFromName('fld1')).type(), QVariant.Bool)
values = {feat['id']: feat['fld1'] for feat in vl.getFeatures()}
expected = {
1: True,
2: False,
3: NULL
}
self.assertEqual(values, expected)
def testByteaType(self):
vl = QgsVectorLayer('{} table="qgis_test"."byte_a_table" sql='.format(
self.dbconn), "testbytea", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName(
'fld1')).type(), QVariant.ByteArray)
values = {feat['id']: feat['fld1'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'YmludmFsdWU='),
2: QByteArray()
}
self.assertEqual(values, expected)
# editing binary values
self.execSQLCommand(
'DROP TABLE IF EXISTS qgis_test."byte_a_table_edit" CASCADE')
self.execSQLCommand(
'CREATE TABLE qgis_test."byte_a_table_edit" ( pk SERIAL NOT NULL PRIMARY KEY, blobby bytea)')
self.execSQLCommand("INSERT INTO qgis_test.\"byte_a_table_edit\" (pk, blobby) VALUES "
"(1, encode('bbb', 'base64')::bytea)")
vl = QgsVectorLayer(
self.dbconn + ' sslmode=disable table="qgis_test"."byte_a_table_edit" sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
values = {feat['pk']: feat['blobby'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'YmJi')
}
self.assertEqual(values, expected)
# change attribute value
self.assertTrue(vl.dataProvider().changeAttributeValues(
{1: {1: QByteArray(b'bbbvx')}}))
values = {feat['pk']: feat['blobby'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'bbbvx')
}
self.assertEqual(values, expected)
# add feature
f = QgsFeature()
f.setAttributes([2, QByteArray(b'cccc')])
self.assertTrue(vl.dataProvider().addFeature(f))
values = {feat['pk']: feat['blobby'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'bbbvx'),
2: QByteArray(b'cccc')
}
self.assertEqual(values, expected)
# change feature
self.assertTrue(vl.dataProvider().changeFeatures(
{2: {1: QByteArray(b'dddd')}}, {}))
values = {feat['pk']: feat['blobby'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'bbbvx'),
2: QByteArray(b'dddd')
}
self.assertEqual(values, expected)
def testCitextType(self):
vl = QgsVectorLayer('{} table="qgis_test"."citext_table" sql='.format(
self.dbconn), "testbytea", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(
fields.at(fields.indexFromName('fld1')).type(), QVariant.String)
values = {feat['id']: feat['fld1'] for feat in vl.getFeatures()}
expected = {
1: 'test val',
2: NULL
}
self.assertEqual(values, expected)
# editing citext values
self.execSQLCommand(
'DROP TABLE IF EXISTS qgis_test."citext_table_edit" CASCADE')
self.execSQLCommand(
'CREATE TABLE qgis_test."citext_table_edit" ( pk SERIAL NOT NULL PRIMARY KEY, txt citext)')
self.execSQLCommand("INSERT INTO qgis_test.\"citext_table_edit\" (pk, txt) VALUES "
"(1, 'text')")
vl = QgsVectorLayer(
self.dbconn + ' sslmode=disable table="qgis_test"."citext_table_edit" sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
values = {feat['pk']: feat['txt'] for feat in vl.getFeatures()}
expected = {
1: 'text'
}
self.assertEqual(values, expected)
# change attribute value
self.assertTrue(
vl.dataProvider().changeAttributeValues({1: {1: 'teeeext'}}))
values = {feat['pk']: feat['txt'] for feat in vl.getFeatures()}
expected = {
1: 'teeeext'
}
self.assertEqual(values, expected)
# add feature
f = QgsFeature()
f.setAttributes([2, 'teeeeeeeeeext'])
self.assertTrue(vl.dataProvider().addFeature(f))
values = {feat['pk']: feat['txt'] for feat in vl.getFeatures()}
expected = {
1: 'teeeext',
2: 'teeeeeeeeeext'
}
self.assertEqual(values, expected)
# change feature
self.assertTrue(vl.dataProvider().changeFeatures(
{2: {1: 'teeeeeeeeeeeeeeeeeeeeeeext'}}, {}))
values = {feat['pk']: feat['txt'] for feat in vl.getFeatures()}
expected = {
1: 'teeeext',
2: 'teeeeeeeeeeeeeeeeeeeeeeext'
}
self.assertEqual(values, expected)
def testQueryLayers(self):
def test_query(dbconn, query, key):
ql = QgsVectorLayer(
'%s srid=4326 table="%s" (geom) key=\'%s\' sql=' % (
dbconn, query.replace('"', '\\"'), key), "testgeom",
"postgres")
self.assertTrue(ql.isValid(), '{} ({})'.format(query, key))
test_query(self.dbconn,
'(SELECT NULL::integer "Id1", NULL::integer "Id2", NULL::geometry(Point, 4326) geom LIMIT 0)',
'"Id1","Id2"')
def testWkbTypes(self):
def test_table(dbconn, table_name, wkt):
vl = QgsVectorLayer('%s srid=4326 table="qgis_test".%s (geom) sql=' % (dbconn, table_name), "testgeom",
"postgres")
self.assertTrue(vl.isValid())
for f in vl.getFeatures():
self.assertEqual(f.geometry().asWkt(), wkt)
test_table(self.dbconn, 'p2d', 'Polygon ((0 0, 1 0, 1 1, 0 1, 0 0))')
test_table(self.dbconn, 'p3d',
'PolygonZ ((0 0 0, 1 0 0, 1 1 0, 0 1 0, 0 0 0))')
test_table(self.dbconn, 'triangle2d', 'Polygon ((0 0, 1 0, 1 1, 0 0))')
test_table(self.dbconn, 'triangle3d',
'PolygonZ ((0 0 0, 1 0 0, 1 1 0, 0 0 0))')
test_table(self.dbconn, 'tin2d',
'MultiPolygon (((0 0, 1 0, 1 1, 0 0)),((0 0, 0 1, 1 1, 0 0)))')
test_table(self.dbconn, 'tin3d',
'MultiPolygonZ (((0 0 0, 1 0 0, 1 1 0, 0 0 0)),((0 0 0, 0 1 0, 1 1 0, 0 0 0)))')
test_table(self.dbconn, 'ps2d',
'MultiPolygon (((0 0, 1 0, 1 1, 0 1, 0 0)))')
test_table(self.dbconn, 'ps3d',
'MultiPolygonZ (((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),((0 0 1, 1 0 1, 1 1 1, 0 1 1, 0 0 1)),((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)),((0 1 0, 0 1 1, 1 1 1, 1 1 0, 0 1 0)),((1 1 0, 1 1 1, 1 0 1, 1 0 0, 1 1 0)),((1 0 0, 1 0 1, 0 0 1, 0 0 0, 1 0 0)))')
test_table(self.dbconn, 'mp3d',
'MultiPolygonZ (((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),((0 0 1, 1 0 1, 1 1 1, 0 1 1, 0 0 1)),((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)),((0 1 0, 0 1 1, 1 1 1, 1 1 0, 0 1 0)),((1 1 0, 1 1 1, 1 0 1, 1 0 0, 1 1 0)),((1 0 0, 1 0 1, 0 0 1, 0 0 0, 1 0 0)))')
test_table(self.dbconn, 'pt2d', 'Point (0 0)')
test_table(self.dbconn, 'pt3d', 'PointZ (0 0 0)')
test_table(self.dbconn, 'ls2d', 'LineString (0 0, 1 1)')
test_table(self.dbconn, 'ls3d', 'LineStringZ (0 0 0, 1 1 1)')
test_table(self.dbconn, 'mpt2d', 'MultiPoint ((0 0),(1 1))')
test_table(self.dbconn, 'mpt3d', 'MultiPointZ ((0 0 0),(1 1 1))')
test_table(self.dbconn, 'mls2d',
'MultiLineString ((0 0, 1 1),(2 2, 3 3))')
test_table(self.dbconn, 'mls3d',
'MultiLineStringZ ((0 0 0, 1 1 1),(2 2 2, 3 3 3))')
test_table(self.dbconn, 'pt4d', 'PointZM (1 2 3 4)')
def testMetadata(self):
""" Test that metadata is correctly acquired from provider """
metadata = self.vl.metadata()
self.assertEqual(
metadata.crs(), QgsCoordinateReferenceSystem.fromEpsgId(4326))
self.assertEqual(metadata.type(), 'dataset')
self.assertEqual(metadata.abstract(), 'QGIS Test Table')
def testGetFeaturesUniqueId(self):
"""
Test tables with inheritance for unique ids
"""
def test_unique(features, num_features):
featureids = []
for f in features:
self.assertFalse(f.id() in featureids)
featureids.append(f.id())
self.assertEqual(len(features), num_features)
vl = QgsVectorLayer('%s srid=4326 table="qgis_test".%s (geom) sql=' % (self.dbconn, 'someData'), "testgeom",
"postgres")
self.assertTrue(vl.isValid())
# Test someData
test_unique([f for f in vl.getFeatures()], 5)
# Test base_table_bad: layer is invalid
vl = QgsVectorLayer('%s srid=4326 table="qgis_test".%s (geom) sql=' % (self.dbconn, 'base_table_bad'),
"testgeom", "postgres")
self.assertFalse(vl.isValid())
# Test base_table_bad with use estimated metadata: layer is valid because the unique test is skipped
vl = QgsVectorLayer(
'%s srid=4326 estimatedmetadata="true" table="qgis_test".%s (geom) sql=' % (
self.dbconn, 'base_table_bad'),
"testgeom", "postgres")
self.assertTrue(vl.isValid())
# Test base_table_good: layer is valid
vl = QgsVectorLayer('%s srid=4326 table="qgis_test".%s (geom) sql=' % (self.dbconn, 'base_table_good'),
"testgeom", "postgres")
self.assertTrue(vl.isValid())
test_unique([f for f in vl.getFeatures()], 4)
# Test base_table_good with use estimated metadata: layer is valid
vl = QgsVectorLayer(
'%s srid=4326 estimatedmetadata="true" table="qgis_test".%s (geom) sql=' % (
self.dbconn, 'base_table_good'),
"testgeom", "postgres")
self.assertTrue(vl.isValid())
test_unique([f for f in vl.getFeatures()], 4)
# See https://github.com/qgis/QGIS/issues/22258
# TODO: accept multi-featured layers, and an array of values/fids
def testSignedIdentifiers(self):
def test_layer(ql, att, val, fidval):
self.assertTrue(ql.isValid())
features = ql.getFeatures()
att_idx = ql.fields().lookupField(att)
count = 0
for f in features:
count += 1
self.assertEqual(f.attributes()[att_idx], val)
self.assertEqual(f.id(), fidval)
self.assertEqual(count, 1)
def test(dbconn, query, att, val, fidval):
table = query.replace('"', '\\"')
uri = '%s table="%s" (g) key=\'%s\'' % (dbconn, table, att)
ql = QgsVectorLayer(uri, "t", "postgres")
test_layer(ql, att, val, fidval)
# now with estimated metadata
uri += ' estimatedmetadata="true"'
test_layer(ql, att, val, fidval)
# --- INT16 ----
# zero
test(self.dbconn, '(SELECT 0::int2 i, NULL::geometry(Point) g)', 'i', 0, 0)
# low positive
test(self.dbconn, '(SELECT 1::int2 i, NULL::geometry(Point) g)', 'i', 1, 1)
# low negative
test(self.dbconn, '(SELECT -1::int2 i, NULL::geometry(Point) g)',
'i', -1, 4294967295)
# max positive signed 16bit integer
test(self.dbconn, '(SELECT 32767::int2 i, NULL::geometry(Point) g)',
'i', 32767, 32767)
# max negative signed 16bit integer
test(self.dbconn, '(SELECT (-32768)::int2 i, NULL::geometry(Point) g)',
'i', -32768, 4294934528)
# --- INT32 ----
# zero
test(self.dbconn, '(SELECT 0::int4 i, NULL::geometry(Point) g)', 'i', 0, 0)
# low positive
test(self.dbconn, '(SELECT 2::int4 i, NULL::geometry(Point) g)', 'i', 2, 2)
# low negative
test(self.dbconn, '(SELECT -2::int4 i, NULL::geometry(Point) g)',
'i', -2, 4294967294)
# max positive signed 32bit integer
test(self.dbconn, '(SELECT 2147483647::int4 i, NULL::geometry(Point) g)',
'i', 2147483647, 2147483647)
# max negative signed 32bit integer
test(self.dbconn, '(SELECT (-2147483648)::int4 i, NULL::geometry(Point) g)',
'i', -2147483648, 2147483648)
# --- INT64 (FIDs are always 1 because assigned ex-novo) ----
# zero
test(self.dbconn, '(SELECT 0::int8 i, NULL::geometry(Point) g)', 'i', 0, 1)
# low positive
test(self.dbconn, '(SELECT 3::int8 i, NULL::geometry(Point) g)', 'i', 3, 1)
# low negative
test(self.dbconn, '(SELECT -3::int8 i, NULL::geometry(Point) g)', 'i', -3, 1)
# max positive signed 64bit integer
test(self.dbconn, '(SELECT 9223372036854775807::int8 i, NULL::geometry(Point) g)',
'i', 9223372036854775807, 1)
# max negative signed 32bit integer
test(self.dbconn, '(SELECT (-9223372036854775808)::int8 i, NULL::geometry(Point) g)', 'i', -9223372036854775808,
1)
def testPktIntInsert(self):
vl = QgsVectorLayer('{} table="qgis_test"."{}" key="pk" sql='.format(self.dbconn, 'bikes_view'), "bikes_view",
"postgres")
self.assertTrue(vl.isValid())
f = QgsFeature(vl.fields())
f['pk'] = NULL
f['name'] = 'Cilo'
r, f = vl.dataProvider().addFeatures([f])
self.assertTrue(r)
self.assertNotEqual(f[0]['pk'], NULL, f[0].attributes())
vl.deleteFeatures([f[0].id()])
def testGeneratedFields(self):
"""Test if GENERATED geometry/geography columns are correctly handled by the provider."""
cur = self.con.cursor()
cur.execute("SHOW server_version_num")
pgversion = int(cur.fetchone()[0])
# GENERATED columns are unsupported by PostgreSQL versions earlier than 12.
if pgversion < 120000:
return
# Geometry columns
vl = QgsVectorLayer('{} table="qgis_test"."{}" (geom) srid=4326 type=POLYGON key="id" sql='.format(self.dbconn, "test_gen_col"), "test_gen_col", "postgres")
self.assertTrue(vl.isValid())
# writing geometry...
f = QgsFeature(vl.fields())
ix_name = f.fieldNameIndex('name')
f.setGeometry(QgsGeometry.fromWkt('Polygon ((-67 -2, -67 0, -68 0, -70 -1, -67 -2))'))
f.setAttribute(ix_name, 'QGIS-3')
self.assertTrue(vl.startEditing())
self.assertTrue(vl.addFeatures([f]))
self.assertTrue(vl.commitChanges())
# reading back to see if we saved the centroid correctly.
vl2 = QgsVectorLayer('{} table="qgis_test"."{}" (cent) srid=4326 type=POINT key="id" sql='.format(self.dbconn, "test_gen_col"), "test_gen_col", "postgres")
f2 = next(vl2.getFeatures(QgsFeatureRequest()))
generated_geometry = f2.geometry().asWkt()
expected_geometry = 'Point (-68.047619047619051 -0.90476190476190477)'
expected_area = 43069568296.34387
assert compareWkt(generated_geometry, expected_geometry), "Geometry mismatch! Expected:\n{}\nGot:\n{}\n".format(expected_geometry, generated_geometry)
self.assertAlmostEqual(f2['poly_area'], expected_area, places=4)
self.assertEqual(f2['name'], 'QGIS-3')
# Checking if we can correctly change values of an existing feature.
self.assertTrue(vl2.startEditing())
ix2_name = f2.fieldNameIndex('name')
fid2 = f2.id()
vl2.changeAttributeValue(fid2, ix2_name, 'New')
self.assertTrue(vl2.commitChanges())
# getting a brand new QgsVectorLayer
vl = QgsVectorLayer('{} table="qgis_test"."{}" (geom) srid=4326 type=POLYGON key="id" sql='.format(self.dbconn, "test_gen_col"), "test_gen_col", "postgres")
self.assertTrue(vl.isValid())
# checking if the name field was correctly updated
f = next(vl.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['name'], 'New')
# Now, check if we can change the value of a GENERATED field (we shouldn't)
self.assertTrue(vl.startEditing())
ix_area = f.fieldNameIndex('poly_area')
fid = f.id()
vl.changeAttributeValue(fid, ix_area, 42)
self.assertTrue(vl.commitChanges())
# reading back
vl2 = QgsVectorLayer('{} table="qgis_test"."{}" (geom) srid=4326 type=POLYGON key="id" sql='.format(self.dbconn, "test_gen_col"), "test_gen_col", "postgres")
f2 = next(vl2.getFeatures(QgsFeatureRequest()))
self.assertAlmostEqual(f2['poly_area'], expected_area, places=4)
# now, getting a brand new QgsVectorLayer to check if changes (UPDATE) in the geometry are reflected in the generated fields
vl = QgsVectorLayer('{} table="qgis_test"."{}" (geom) srid=4326 type=POLYGON key="id" sql='.format(self.dbconn, "test_gen_col"), "test_gen_col", "postgres")
self.assertTrue(vl.isValid())
f = next(vl.getFeatures(QgsFeatureRequest()))
vl.startEditing()
fid = f.id()
vl.changeGeometry(fid, QgsGeometry.fromWkt('Polygon ((-67 -2, -65 0, -68 0, -70 -1, -67 -2))'))
vl.commitChanges()
# reading back...
vl2 = QgsVectorLayer('{} table="qgis_test"."{}" (cent) srid=4326 type=POINT key="id" sql='.format(self.dbconn, "test_gen_col"), "test_gen_col", "postgres")
f2 = next(vl2.getFeatures(QgsFeatureRequest()))
generated_geometry = f2.geometry().asWkt()
generated_geometry = f2.geometry().asWkt()
expected_geometry = 'Point (-67.42424242424242209 -0.81818181818181823)'
expected_area = 67718478405.28429
assert compareWkt(generated_geometry, expected_geometry), "Geometry mismatch! Expected:\n{}\nGot:\n{}\n".format(expected_geometry, generated_geometry)
self.assertAlmostEqual(f2['poly_area'], expected_area, places=4)
self.assertEqual(f2['name'], 'New')
# Geography columns
vl3 = QgsVectorLayer('{} table="qgis_test"."{}" (geog) srid=4326 type=POLYGON key="id" sql='.format(self.dbconn, "test_gen_geog_col"), "test_gen_geog_col", "postgres")
self.assertTrue(vl3.isValid())
# writing geography...
f3 = QgsFeature(vl3.fields())
f3.setGeometry(QgsGeometry.fromWkt('Polygon ((-67 -2, -67 0, -68 0, -70 -1, -67 -2))'))
self.assertTrue(vl3.startEditing())
self.assertTrue(vl3.addFeatures([f3]))
self.assertTrue(vl3.commitChanges())
# reading back geography and checking values
vl4 = QgsVectorLayer('{} table="qgis_test"."{}" (cent) srid=4326 type=POINT key="id" sql='.format(self.dbconn, "test_gen_geog_col"), "test_gen_geog_col", "postgres")
f4 = next(vl4.getFeatures(QgsFeatureRequest()))
generated_geometry = f4.geometry().asWkt()
expected_geometry = 'Point (-68.0477406158202 -0.904960604589168)'
expected_area = 43088884296.69713
assert compareWkt(generated_geometry, expected_geometry), "Geometry mismatch! Expected:\n{}\nGot:\n{}\n".format(expected_geometry, generated_geometry)
self.assertEqual(f4['poly_area'], expected_area)
def testNonPkBigintField(self):
"""Test if we can correctly insert, read and change attributes(fields) of type bigint and which are not PKs."""
vl = QgsVectorLayer(
'{} sslmode=disable srid=4326 key="pk" table="qgis_test".{} (geom)'.format(
self.dbconn, 'bigint_pk'),
"bigint_pk", "postgres")
self.assertTrue(vl.isValid())
flds = vl.fields()
# check if default values are correctly read back
f = next(vl.getFeatures(QgsFeatureRequest()))
bigint_with_default_idx = vl.fields().lookupField('bigint_attribute_def')
self.assertEqual(f.attributes()[bigint_with_default_idx], 42)
# check if NULL values are correctly read
bigint_def_null_idx = vl.fields().lookupField('bigint_attribute')
self.assertEqual(f.attributes()[bigint_def_null_idx], NULL)
# check if we can overwrite a default value
vl.startEditing()
vl.changeAttributeValue(f.id(), bigint_with_default_idx, 43)
pkidx = vl.fields().lookupField('pk')
editedid = f.attributes()[pkidx]
self.assertTrue(vl.commitChanges())
vl2 = QgsVectorLayer(
'{} sslmode=disable srid=4326 key="pk" table="qgis_test".{} (geom)'.format(
self.dbconn, 'bigint_pk'),
"bigint_pk", "postgres")
flds = vl2.fields()
self.assertTrue(vl2.isValid())
f = next(vl2.getFeatures(
QgsFeatureRequest().setFilterExpression('pk = ' + str(editedid))))
bigint_with_default_idx = vl2.fields().lookupField('bigint_attribute_def')
self.assertEqual(f.attributes()[bigint_with_default_idx], 43)
# check if we can insert a new value
dp = vl2.dataProvider()
dp.setProviderProperty(QgsDataProvider.EvaluateDefaultValues, 1)
pkidx = vl2.fields().lookupField('pk')
vl2.startEditing()
f = QgsFeature(vl2.fields())
f['pk'] = NULL
f['value'] = 'The answer.'
f['bigint_attribute'] = 84
f.setAttribute(pkidx, vl2.dataProvider().defaultValue(pkidx))
f.setAttribute(bigint_with_default_idx,
vl2.dataProvider().defaultValue(bigint_with_default_idx))
r, f = vl2.dataProvider().addFeatures([f])
self.assertTrue(r)
vl2.commitChanges()
inserted_id = f[0]['pk']
f = next(vl2.getFeatures(
QgsFeatureRequest().setFilterExpression('pk = ' + str(inserted_id))))
self.assertEqual(f['bigint_attribute'], 84)
self.assertEqual(f['bigint_attribute_def'], 42)
def testPktUpdateBigintPk(self):
"""Test if we can update objects with positive, zero and negative bigint PKs."""
vl = QgsVectorLayer(
'{} sslmode=disable srid=4326 key="pk" table="qgis_test".{} (geom)'.format(
self.dbconn, 'bigint_pk'),
"bigint_pk", "postgres")
flds = vl.fields()
self.assertTrue(vl.isValid())
vl.startEditing()
statuses = [-1, -1, -1, -1]
# changing values...
for ft in vl.getFeatures():
if ft['value'] == 'first value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '1st value')
statuses[0] = 0
elif ft['value'] == 'second value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '2nd value')
statuses[1] = 0
elif ft['value'] == 'zero value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '0th value')
statuses[2] = 0
elif ft['value'] == 'negative value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '-1th value')
statuses[3] = 0
self.assertTrue(vl.commitChanges())
self.assertTrue(all(x == 0 for x in statuses))
# now, let's see if the values were changed
vl2 = QgsVectorLayer(
'{} sslmode=disable srid=4326 key="pk" table="qgis_test".{} (geom)'.format(
self.dbconn, 'bigint_pk'),
"bigint_pk", "postgres")
self.assertTrue(vl2.isValid())
for ft in vl2.getFeatures():
if ft['value'] == '1st value':
statuses[0] = 1
elif ft['value'] == '2nd value':
statuses[1] = 1
elif ft['value'] == '0th value':
statuses[2] = 1
elif ft['value'] == '-1th value':
statuses[3] = 1
self.assertTrue(all(x == 1 for x in statuses))
def testPktUpdateBigintPkNonFirst(self):
"""Test if we can update objects with positive, zero and negative bigint PKs in tables whose PK is not the first field"""
vl = QgsVectorLayer('{} sslmode=disable srid=4326 key="pk" table="qgis_test".{} (geom)'.format(self.dbconn,
'bigint_non_first_pk'),
"bigint_non_first_pk", "postgres")
flds = vl.fields()
self.assertTrue(vl.isValid())
vl.startEditing()
statuses = [-1, -1, -1, -1]
# changing values...
for ft in vl.getFeatures():
if ft['value'] == 'first value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '1st value')
statuses[0] = 0
elif ft['value'] == 'second value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '2nd value')
statuses[1] = 0
elif ft['value'] == 'zero value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '0th value')
statuses[2] = 0
elif ft['value'] == 'negative value':
vl.changeAttributeValue(
ft.id(), flds.indexOf('value'), '-1th value')
statuses[3] = 0
self.assertTrue(vl.commitChanges())
self.assertTrue(all(x == 0 for x in statuses))
# now, let's see if the values were changed
vl2 = QgsVectorLayer(
'{} sslmode=disable srid=4326 key="pk" table="qgis_test".{} (geom)'.format(
self.dbconn, 'bigint_pk'),
"bigint_pk_nonfirst", "postgres")
self.assertTrue(vl2.isValid())
for ft in vl2.getFeatures():
if ft['value'] == '1st value':
statuses[0] = 1
elif ft['value'] == '2nd value':
statuses[1] = 1
elif ft['value'] == '0th value':
statuses[2] = 1
elif ft['value'] == '-1th value':
statuses[3] = 1
self.assertTrue(all(x == 1 for x in statuses))
def testPktComposite(self):
"""
Check that tables with PKs composed of many fields of different types are correctly read and written to
"""
vl = QgsVectorLayer('{} sslmode=disable srid=4326 key=\'"pk1","pk2"\' table="qgis_test"."tb_test_compound_pk" (geom)'.format(self.dbconn), "test_compound", "postgres")
self.assertTrue(vl.isValid())
fields = vl.fields()
f = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression('pk1 = 1 AND pk2 = 2')))
# first of all: we must be able to fetch a valid feature
self.assertTrue(f.isValid())
self.assertEqual(f['pk1'], 1)
self.assertEqual(f['pk2'], 2)
self.assertEqual(f['value'], 'test 2')
# can we edit a field?
vl.startEditing()
vl.changeAttributeValue(f.id(), fields.indexOf('value'), 'Edited Test 2')
self.assertTrue(vl.commitChanges())
# Did we get it right? Let's create a new QgsVectorLayer and try to read back our changes:
vl2 = QgsVectorLayer('{} sslmode=disable srid=4326 table="qgis_test"."tb_test_compound_pk" (geom) key=\'"pk1","pk2"\' '.format(self.dbconn), "test_compound2", "postgres")
self.assertTrue(vl2.isValid())
f2 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression('pk1 = 1 AND pk2 = 2')))
self.assertTrue(f2.isValid())
# Then, making sure we really did change our value.
self.assertEqual(f2['value'], 'Edited Test 2')
# How about inserting a new field?
f3 = QgsFeature(vl2.fields())
f3['pk1'] = 4
f3['pk2'] = -9223372036854775800
f3['value'] = 'other test'
vl.startEditing()
res, f3 = vl.dataProvider().addFeatures([f3])
self.assertTrue(res)
self.assertTrue(vl.commitChanges())
# can we catch it on another layer?
f4 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression('pk2 = -9223372036854775800')))
self.assertTrue(f4.isValid())
expected_attrs = [4, -9223372036854775800, 'other test']
self.assertEqual(f4.attributes(), expected_attrs)
# Finally, let's delete one of the features.
f5 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression('pk1 = 2 AND pk2 = 1')))
vl2.startEditing()
vl2.deleteFeatures([f5.id()])
self.assertTrue(vl2.commitChanges())
# did we really delete? Let's try to get the deleted feature from the first layer.
f_iterator = vl.getFeatures(QgsFeatureRequest().setFilterExpression('pk1 = 2 AND pk2 = 1'))
got_feature = True
try:
f6 = next(f_iterator)
got_feature = f6.isValid()
except StopIteration:
got_feature = False
self.assertFalse(got_feature)
def testPktCompositeFloat(self):
"""
Check that tables with PKs composed of many fields of different types are correctly read and written to
"""
vl = QgsVectorLayer('{} sslmode=disable srid=4326 key=\'"pk1","pk2","pk3"\' table="qgis_test"."tb_test_composite_float_pk" (geom)'.format(self.dbconn), "test_composite_float", "postgres")
self.assertTrue(vl.isValid())
fields = vl.fields()
f = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk3 = '3.14159274'")))
# first of all: we must be able to fetch a valid feature
self.assertTrue(f.isValid())
self.assertEqual(f['pk1'], 1)
self.assertEqual(f['pk2'], 2)
self.assertAlmostEqual(f['pk3'], 3.14159274)
self.assertEqual(f['value'], 'test 2')
# can we edit a field?
vl.startEditing()
vl.changeAttributeValue(f.id(), fields.indexOf('value'), 'Edited Test 2')
self.assertTrue(vl.commitChanges())
# Did we get it right? Let's create a new QgsVectorLayer and try to read back our changes:
vl2 = QgsVectorLayer('{} sslmode=disable srid=4326 key=\'"pk1","pk2","pk3"\' table="qgis_test"."tb_test_composite_float_pk" (geom)'.format(self.dbconn), "test_composite_float2", "postgres")
self.assertTrue(vl2.isValid())
f2 = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk3 = '3.14159274'")))
self.assertTrue(f2.isValid())
# just making sure we have the correct feature
self.assertAlmostEqual(f2['pk3'], 3.14159274)
# Then, making sure we really did change our value.
self.assertEqual(f2['value'], 'Edited Test 2')
# How about inserting a new field?
f3 = QgsFeature(vl2.fields())
f3['pk1'] = 4
f3['pk2'] = -9223372036854775800
f3['pk3'] = 7.29154
f3['value'] = 'other test'
vl.startEditing()
res, f3 = vl.dataProvider().addFeatures([f3])
self.assertTrue(res)
self.assertTrue(vl.commitChanges())
# can we catch it on another layer?
f4 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk2 = '-9223372036854775800'")))
self.assertTrue(f4.isValid())
expected_attrs = [4, -9223372036854775800, 7.29154, 'other test']
gotten_attrs = [f4['pk1'], f4['pk2'], f4['pk3'], f4['value']]
self.assertEqual(gotten_attrs[0], expected_attrs[0])
self.assertEqual(gotten_attrs[1], expected_attrs[1])
self.assertAlmostEqual(gotten_attrs[2], expected_attrs[2], places=4)
self.assertEqual(gotten_attrs[3], expected_attrs[3])
# Finally, let's delete one of the features.
f5 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk3 = '7.29154'")))
vl2.startEditing()
vl2.deleteFeatures([f5.id()])
self.assertTrue(vl2.commitChanges())
# did we really delete?
f_iterator = vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk3 = '7.29154'"))
got_feature = True
try:
f6 = next(f_iterator)
got_feature = f6.isValid()
except StopIteration:
got_feature = False
self.assertFalse(got_feature)
def testPktFloatingPoint(self):
"""
Check if we can handle floating point/numeric primary keys correctly
"""
# 1. 32 bit float (PostgreSQL "REAL" type)
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable srid=4326 key="pk" table="qgis_test"."tb_test_float_pk" (geom)', "test_float_pk", "postgres")
self.assertTrue(vl.isValid())
# 1.1. Retrieving
f = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '3.141592653589793238462643383279502884197169'")))
self.assertTrue(f.isValid())
self.assertEqual(f['value'], 'first teste')
# 1.2. Editing
self.assertTrue(vl.startEditing())
vl.changeAttributeValue(f.id(), vl.fields().indexOf('value'), 'Changed first')
self.assertTrue(vl.commitChanges())
# 1.2.1. Checking edit from another vector layer
vl2 = QgsVectorLayer(self.dbconn + ' sslmode=disable srid=4326 key="pk1" table="qgis_test"."tb_test_float_pk" (geom)', "test_float_pk2", "postgres")
self.assertTrue(vl2.isValid())
f2 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '3.141592653589793238462643383279502884197169'")))
self.assertTrue(f2.isValid())
self.assertEqual(f2['value'], 'Changed first')
# 1.3. Deleting
f = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '2.718281828459045235360287471352662497757247'")))
vl.startEditing()
vl.deleteFeatures([f.id()])
self.assertTrue(vl.commitChanges())
# 1.3.1. Checking deletion
f_iterator = vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '2.718281828459045235360287471352662497757247'"))
got_feature = True
try:
f2 = next(f_iterator)
got_feature = f2.isValid()
except StopIteration:
got_feature = False
self.assertFalse(got_feature)
# 1.4. Inserting new feature
newpointwkt = 'Point(-47.751 -15.644)'
f = QgsFeature(vl.fields())
f['pk'] = 0.22222222222222222222222
f['value'] = 'newly inserted'
f.setGeometry(QgsGeometry.fromWkt(newpointwkt))
vl.startEditing()
res, f = vl.dataProvider().addFeatures([f])
self.assertTrue(res)
self.assertTrue(vl.commitChanges())
# 1.4.1. Checking insertion
f2 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '0.22222222222222222222222'")))
self.assertTrue(f2.isValid())
self.assertAlmostEqual(f2['pk'], 0.2222222222222222)
self.assertEqual(f2['value'], 'newly inserted')
assert compareWkt(f2.geometry().asWkt(), newpointwkt), "Geometry mismatch. Expected: {} Got: {} \n".format(f2.geometry().asWkt(), newpointwkt)
# One more check: can we retrieve the same row with the value that we got from this layer?
floatpk = f2['pk']
f3 = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '{}'".format(floatpk))))
self.assertTrue(f3.isValid())
self.assertEqual(f3['value'], 'newly inserted')
self.assertEqual(f3['pk'], floatpk)
# 2. 64 bit float (PostgreSQL "DOUBLE PRECISION" type)
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable srid=4326 key="pk" table="qgis_test"."tb_test_double_pk" (geom)', "test_double_pk", "postgres")
self.assertTrue(vl.isValid())
# 2.1. Retrieving
f = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '3.141592653589793238462643383279502884197169'")))
self.assertTrue(f.isValid())
self.assertEqual(f['value'], 'first teste')
# 2.2. Editing
self.assertTrue(vl.startEditing())
vl.changeAttributeValue(f.id(), vl.fields().indexOf('value'), 'Changed first')
self.assertTrue(vl.commitChanges())
# 2.2.1. Checking edit from another vector layer
vl2 = QgsVectorLayer(self.dbconn + ' sslmode=disable srid=4326 key="pk" table="qgis_test"."tb_test_double_pk" (geom)', "test_double_pk2", "postgres")
self.assertTrue(vl2.isValid())
f2 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '3.141592653589793238462643383279502884197169'")))
self.assertTrue(f2.isValid())
self.assertEqual(f2['value'], 'Changed first')
# 2.3. Deleting
f = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '2.718281828459045235360287471352662497757247'")))
vl.startEditing()
vl.deleteFeatures([f.id()])
self.assertTrue(vl.commitChanges())
# 2.3.1. Checking deletion
f_iterator = vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '2.718281828459045235360287471352662497757247'"))
got_feature = True
try:
f2 = next(f_iterator)
got_feature = f2.isValid()
except StopIteration:
got_feature = False
self.assertFalse(got_feature)
# 2.4. Inserting new feature
newpointwkt = 'Point(-47.751 -15.644)'
f = QgsFeature(vl.fields())
f['pk'] = 0.22222222222222222222222
f['value'] = 'newly inserted'
f.setGeometry(QgsGeometry.fromWkt(newpointwkt))
vl.startEditing()
res, f = vl.dataProvider().addFeatures([f])
self.assertTrue(res)
self.assertTrue(vl.commitChanges())
# 2.4.1. Checking insertion
f2 = next(vl2.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '0.22222222222222222222222'")))
self.assertTrue(f2.isValid())
self.assertAlmostEqual(f2['pk'], 0.2222222222222222, places=15)
self.assertEqual(f2['value'], 'newly inserted')
assert compareWkt(f2.geometry().asWkt(), newpointwkt), "Geometry mismatch. Expected: {} Got: {} \n".format(f2.geometry().asWkt(), newpointwkt)
# One more check: can we retrieve the same row with the value that we got from this layer?
doublepk = f2['pk']
f3 = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("pk = '{}'".format(doublepk))))
self.assertTrue(f3.isValid())
self.assertEqual(f3['value'], 'newly inserted')
self.assertEqual(f3['pk'], doublepk)
# no NUMERIC/DECIMAL checks here. NUMERIC primary keys are unsupported.
# TODO: implement NUMERIC primary keys/arbitrary precision arithmethics/fixed point math in QGIS.
def testPktMapInsert(self):
vl = QgsVectorLayer('{} table="qgis_test"."{}" key="obj_id" sql='.format(self.dbconn, 'oid_serial_table'),
"oid_serial", "postgres")
self.assertTrue(vl.isValid())
f = QgsFeature(vl.fields())
f['obj_id'] = vl.dataProvider().defaultValueClause(0)
f['name'] = 'Test'
r, f = vl.dataProvider().addFeatures([f])
self.assertTrue(r)
self.assertNotEqual(f[0]['obj_id'], NULL, f[0].attributes())
vl.deleteFeatures([f[0].id()])
def testNull(self):
"""
Asserts that 0, '' and NULL are treated as different values on insert
"""
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' table="qgis_test"."constraints" sql=', 'test1',
'postgres')
self.assertTrue(vl.isValid())
QgsProject.instance().addMapLayer(vl)
tg = QgsTransactionGroup()
tg.addLayer(vl)
vl.startEditing()
def onError(message):
"""We should not get here. If we do, fail and say why"""
self.assertFalse(True, message)
vl.raiseError.connect(onError)
f = QgsFeature(vl.fields())
f['gid'] = 100
f['val'] = 0
f['name'] = ''
self.assertTrue(vl.addFeature(f))
feature = next(vl.getFeatures('"gid" = 100'))
self.assertEqual(f['val'], feature['val'])
self.assertEqual(f['name'], feature['name'])
def testNestedInsert(self):
tg = QgsTransactionGroup()
tg.addLayer(self.vl)
self.vl.startEditing()
it = self.vl.getFeatures()
f = next(it)
f['pk'] = NULL
self.vl.addFeature(f) # Should not deadlock during an active iteration
f = next(it)
def testTimeout(self):
"""
Asserts that we will not deadlock if more iterators are opened in parallel than
available in the connection pool
"""
request = QgsFeatureRequest()
request.setTimeout(1)
iterators = list()
for i in range(100):
iterators.append(self.vl.getFeatures(request))
def testTransactionDirtyName(self):
# create a vector ayer based on postgres
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
# prepare a project with transactions enabled
p = QgsProject()
p.setAutoTransaction(True)
p.addMapLayers([vl])
vl.startEditing()
# update the data within the transaction
tr = vl.dataProvider().transaction()
sql = "update qgis_test.some_poly_data set pk=1 where pk=1"
name = "My Awesome Transaction!"
self.assertTrue(tr.executeSql(sql, True, name)[0])
# test name
self.assertEqual(vl.undoStack().command(0).text(), name)
# rollback
vl.rollBack()
def testTransactionDirty(self):
# create a vector layer based on postgres
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
# prepare a project with transactions enabled
p = QgsProject()
p.setAutoTransaction(True)
p.addMapLayers([vl])
vl.startEditing()
# check that the feature used for testing is ok
ft0 = vl.getFeatures('pk=1')
f = QgsFeature()
self.assertTrue(ft0.nextFeature(f))
# update the data within the transaction
tr = vl.dataProvider().transaction()
sql = "update qgis_test.some_poly_data set pk=33 where pk=1"
self.assertTrue(tr.executeSql(sql, True)[0])
# check that the pk of the feature has been changed
ft = vl.getFeatures('pk=1')
self.assertFalse(ft.nextFeature(f))
ft = vl.getFeatures('pk=33')
self.assertTrue(ft.nextFeature(f))
# underlying data has been modified but the layer is not tagged as
# modified
self.assertTrue(vl.isModified())
# undo sql query
vl.undoStack().undo()
# check that the original feature with pk is back
ft0 = vl.getFeatures('pk=1')
self.assertTrue(ft0.nextFeature(f))
# redo
vl.undoStack().redo()
# check that the pk of the feature has been changed
ft1 = vl.getFeatures('pk=1')
self.assertFalse(ft1.nextFeature(f))
def testTransactionConstraints(self):
# create a vector layer based on postgres
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'id\' table="qgis_test"."check_constraints" sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
# prepare a project with transactions enabled
p = QgsProject()
p.setAutoTransaction(True)
p.addMapLayers([vl])
# get feature
f = QgsFeature()
self.assertTrue(vl.getFeatures('id=1').nextFeature(f))
self.assertEqual(f.attributes(), [1, 4, 3])
# start edition
vl.startEditing()
# update attribute form with a failing constraints
# coming from the database if attributes are updated
# one at a time.
# Current feature: a = 4 / b = 3
# Update feature: a = 1 / b = 0
# If updated one at a time, '(a = 1) < (b = 3)' => FAIL!
form = QgsAttributeForm(vl, f)
for w in form.findChildren(QLabel):
if w.buddy():
spinBox = w.buddy()
if w.text() == 'a':
spinBox.setValue(1)
elif w.text() == 'b':
spinBox.setValue(0)
# save
form.save()
# check new values
self.assertTrue(vl.getFeatures('id=1').nextFeature(f))
self.assertEqual(f.attributes(), [1, 1, 0])
def testTransactionTuple(self):
# create a vector layer based on postgres
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
# prepare a project with transactions enabled
p = QgsProject()
p.setAutoTransaction(True)
p.addMapLayers([vl])
vl.startEditing()
# execute a query which returns a tuple
tr = vl.dataProvider().transaction()
sql = "select * from qgis_test.some_poly_data"
self.assertTrue(tr.executeSql(sql, False)[0])
# underlying data has not been modified
self.assertFalse(vl.isModified())
def testDomainTypes(self):
"""Test that domain types are correctly mapped"""
vl = QgsVectorLayer('%s table="qgis_test"."domains" sql=' %
(self.dbconn), "domains", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
expected = {}
expected['fld_var_char_domain'] = {'type': QVariant.String, 'typeName': 'qgis_test.var_char_domain',
'length': -1}
expected['fld_var_char_domain_6'] = {'type': QVariant.String, 'typeName': 'qgis_test.var_char_domain_6',
'length': 6}
expected['fld_character_domain'] = {'type': QVariant.String, 'typeName': 'qgis_test.character_domain',
'length': 1}
expected['fld_character_domain_6'] = {'type': QVariant.String, 'typeName': 'qgis_test.character_domain_6',
'length': 6}
expected['fld_char_domain'] = {
'type': QVariant.String, 'typeName': 'qgis_test.char_domain', 'length': 1}
expected['fld_char_domain_6'] = {
'type': QVariant.String, 'typeName': 'qgis_test.char_domain_6', 'length': 6}
expected['fld_text_domain'] = {
'type': QVariant.String, 'typeName': 'qgis_test.text_domain', 'length': -1}
expected['fld_numeric_domain'] = {'type': QVariant.Double, 'typeName': 'qgis_test.numeric_domain', 'length': 10,
'precision': 4}
for f, e in list(expected.items()):
self.assertEqual(
fields.at(fields.indexFromName(f)).type(), e['type'])
self.assertEqual(fields.at(fields.indexFromName(f)
).typeName(), e['typeName'])
self.assertEqual(
fields.at(fields.indexFromName(f)).length(), e['length'])
if 'precision' in e:
self.assertEqual(
fields.at(fields.indexFromName(f)).precision(), e['precision'])
def testRenameAttributes(self):
''' Test renameAttributes() '''
vl = QgsVectorLayer('%s table="qgis_test"."rename_table" sql=' % (
self.dbconn), "renames", "postgres")
provider = vl.dataProvider()
provider.renameAttributes({1: 'field1', 2: 'field2'})
# bad rename
self.assertFalse(provider.renameAttributes({-1: 'not_a_field'}))
self.assertFalse(provider.renameAttributes({100: 'not_a_field'}))
# already exists
self.assertFalse(provider.renameAttributes({1: 'field2'}))
# rename one field
self.assertTrue(provider.renameAttributes({1: 'newname'}))
self.assertEqual(provider.fields().at(1).name(), 'newname')
vl.updateFields()
fet = next(vl.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'newname')
# rename two fields
self.assertTrue(provider.renameAttributes(
{1: 'newname2', 2: 'another'}))
self.assertEqual(provider.fields().at(1).name(), 'newname2')
self.assertEqual(provider.fields().at(2).name(), 'another')
vl.updateFields()
fet = next(vl.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'newname2')
self.assertEqual(fet.fields()[2].name(), 'another')
# close layer and reopen, then recheck to confirm that changes were saved to db
del vl
vl = None
vl = QgsVectorLayer('%s table="qgis_test"."rename_table" sql=' % (
self.dbconn), "renames", "postgres")
provider = vl.dataProvider()
self.assertEqual(provider.fields().at(1).name(), 'newname2')
self.assertEqual(provider.fields().at(2).name(), 'another')
fet = next(vl.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'newname2')
self.assertEqual(fet.fields()[2].name(), 'another')
def testEditorWidgetTypes(self):
"""Test that editor widget types can be fetched from the qgis_editor_widget_styles table"""
vl = QgsVectorLayer('%s table="qgis_test"."widget_styles" sql=' % (
self.dbconn), "widget_styles", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
setup1 = fields.field("fld1").editorWidgetSetup()
self.assertFalse(setup1.isNull())
self.assertEqual(setup1.type(), "FooEdit")
self.assertEqual(setup1.config(), {"param1": "value1", "param2": "2"})
best1 = QgsGui.editorWidgetRegistry().findBest(vl, "fld1")
self.assertEqual(best1.type(), "FooEdit")
self.assertEqual(best1.config(), setup1.config())
self.assertTrue(fields.field("fld2").editorWidgetSetup().isNull())
best2 = QgsGui.editorWidgetRegistry().findBest(vl, "fld2")
self.assertEqual(best2.type(), "TextEdit")
def testHstore(self):
vl = QgsVectorLayer('%s table="qgis_test"."dict" sql=' %
(self.dbconn), "testhstore", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(
fields.at(fields.indexFromName('value')).type(), QVariant.Map)
f = next(vl.getFeatures(QgsFeatureRequest()))
value_idx = vl.fields().lookupField('value')
self.assertIsInstance(f.attributes()[value_idx], dict)
self.assertEqual(f.attributes()[value_idx], {'a': 'b', '1': '2'})
new_f = QgsFeature(vl.fields())
new_f['pk'] = NULL
new_f['value'] = {'simple': '1', 'doubleQuote': '"y"',
'quote': "'q'", 'backslash': '\\'}
r, fs = vl.dataProvider().addFeatures([new_f])
self.assertTrue(r)
new_pk = fs[0]['pk']
self.assertNotEqual(new_pk, NULL, fs[0].attributes())
try:
read_back = vl.getFeature(new_pk)
self.assertEqual(read_back['pk'], new_pk)
self.assertEqual(read_back['value'], new_f['value'])
finally:
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeatures([new_pk]))
self.assertTrue(vl.commitChanges())
def testJson(self):
vl = QgsVectorLayer('%s table="qgis_test"."json" sql=' %
(self.dbconn), "testjson", "postgres")
self.assertTrue(vl.isValid())
attrs = (
123,
1233.45,
None,
True,
False,
r"String literal with \"quotes\" 'and' other funny chars []{};#/èé*",
[1, 2, 3.4, None],
[True, False],
{'a': 123, 'b': 123.34, 'c': 'a string', 'd': [
1, 2, 3], 'e': {'a': 123, 'b': 123.45}}
)
attrs2 = (
246,
2466.91,
None,
True,
False,
r"Yet another string literal with \"quotes\" 'and' other funny chars: π []{};#/èé*",
[2, 4, 3.14159, None],
[True, False],
{'a': 246, 'b': 246.68, 'c': 'a rounded area: π × r²', 'd': [
1, 2, 3], 'e': {'a': 246, 'b': 246.91}}
)
json_idx = vl.fields().lookupField('jvalue')
jsonb_idx = vl.fields().lookupField('jbvalue')
for attr in attrs:
# Add a new feature
vl2 = QgsVectorLayer('%s table="qgis_test"."json" sql=' % (
self.dbconn), "testjson", "postgres")
self.assertTrue(vl2.startEditing())
f = QgsFeature(vl2.fields())
f.setAttributes([None, attr, attr])
self.assertTrue(vl2.addFeatures([f]))
self.assertTrue(vl2.commitChanges(), attr)
# Read back
vl2 = QgsVectorLayer('%s table="qgis_test"."json" sql=' % (
self.dbconn), "testjson", "postgres")
fid = [f.id() for f in vl2.getFeatures()][-1]
f = vl2.getFeature(fid)
self.assertEqual(f.attributes(), [fid, attr, attr])
# Change attribute values
vl2 = QgsVectorLayer('%s table="qgis_test"."json" sql=' % (
self.dbconn), "testjson", "postgres")
fid = [f.id() for f in vl2.getFeatures()][-1]
self.assertTrue(vl2.startEditing())
self.assertTrue(vl2.changeAttributeValues(
fid, {json_idx: attr, jsonb_idx: attr}))
self.assertTrue(vl2.commitChanges())
# Read back
vl2 = QgsVectorLayer('%s table="qgis_test"."json" sql=' % (
self.dbconn), "testjson", "postgres")
f = vl2.getFeature(fid)
self.assertEqual(f.attributes(), [fid, attr, attr])
# Let's check changeFeatures:
for attr in attrs2:
vl2 = QgsVectorLayer('%s table="qgis_test"."json" sql=' % (
self.dbconn), "testjson", "postgres")
fid = [f.id() for f in vl2.getFeatures()][-1]
self.assertTrue(vl2.startEditing())
self.assertTrue(vl2.dataProvider().changeFeatures({fid: {json_idx: attr, jsonb_idx: attr}}, {}))
self.assertTrue(vl2.commitChanges())
# Read back again
vl2 = QgsVectorLayer('%s table="qgis_test"."json" sql=' % (
self.dbconn), "testjson", "postgres")
f = vl2.getFeature(fid)
self.assertEqual(f.attributes(), [fid, attr, attr])
def testStringArray(self):
vl = QgsVectorLayer('%s table="qgis_test"."string_array" sql=' % (
self.dbconn), "teststringarray", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName(
'value')).type(), QVariant.StringList)
self.assertEqual(fields.at(fields.indexFromName(
'value')).subType(), QVariant.String)
f = next(vl.getFeatures(QgsFeatureRequest()))
value_idx = vl.fields().lookupField('value')
self.assertIsInstance(f.attributes()[value_idx], list)
self.assertEqual(f.attributes()[value_idx], ['a', 'b', 'c'])
new_f = QgsFeature(vl.fields())
new_f['pk'] = NULL
new_f['value'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
r, fs = vl.dataProvider().addFeatures([new_f])
self.assertTrue(r)
new_pk = fs[0]['pk']
self.assertNotEqual(new_pk, NULL, fs[0].attributes())
try:
read_back = vl.getFeature(new_pk)
self.assertEqual(read_back['pk'], new_pk)
self.assertEqual(read_back['value'], new_f['value'])
finally:
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeatures([new_pk]))
self.assertTrue(vl.commitChanges())
def testIntArray(self):
vl = QgsVectorLayer('%s table="qgis_test"."int_array" sql=' % (
self.dbconn), "testintarray", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(
fields.at(fields.indexFromName('value')).type(), QVariant.List)
self.assertEqual(fields.at(fields.indexFromName(
'value')).subType(), QVariant.Int)
f = next(vl.getFeatures(QgsFeatureRequest()))
value_idx = vl.fields().lookupField('value')
self.assertIsInstance(f.attributes()[value_idx], list)
self.assertEqual(f.attributes()[value_idx], [1, 2, -5])
def testDoubleArray(self):
vl = QgsVectorLayer('%s table="qgis_test"."double_array" sql=' % (
self.dbconn), "testdoublearray", "postgres")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(
fields.at(fields.indexFromName('value')).type(), QVariant.List)
self.assertEqual(fields.at(fields.indexFromName(
'value')).subType(), QVariant.Double)
f = next(vl.getFeatures(QgsFeatureRequest()))
value_idx = vl.fields().lookupField('value')
self.assertIsInstance(f.attributes()[value_idx], list)
self.assertEqual(f.attributes()[value_idx], [1.1, 2, -5.12345])
def testNotNullConstraint(self):
vl = QgsVectorLayer('%s table="qgis_test"."constraints" sql=' % (
self.dbconn), "constraints", "postgres")
self.assertTrue(vl.isValid())
self.assertEqual(len(vl.fields()), 4)
# test some bad field indexes
self.assertEqual(vl.dataProvider().fieldConstraints(-1),
QgsFieldConstraints.Constraints())
self.assertEqual(vl.dataProvider().fieldConstraints(
1001), QgsFieldConstraints.Constraints())
self.assertTrue(vl.dataProvider().fieldConstraints(0) &
QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(vl.dataProvider().fieldConstraints(1)
& QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.dataProvider().fieldConstraints(2) &
QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(vl.dataProvider().fieldConstraints(3)
& QgsFieldConstraints.ConstraintNotNull)
# test that constraints have been saved to fields correctly
fields = vl.fields()
self.assertTrue(fields.at(0).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(1).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(fields.at(2).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(2).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(3).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
def testUniqueConstraint(self):
vl = QgsVectorLayer('%s table="qgis_test"."constraints" sql=' % (
self.dbconn), "constraints", "postgres")
self.assertTrue(vl.isValid())
self.assertEqual(len(vl.fields()), 4)
# test some bad field indexes
self.assertEqual(vl.dataProvider().fieldConstraints(-1),
QgsFieldConstraints.Constraints())
self.assertEqual(vl.dataProvider().fieldConstraints(
1001), QgsFieldConstraints.Constraints())
self.assertTrue(vl.dataProvider().fieldConstraints(0)
& QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.dataProvider().fieldConstraints(1)
& QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.dataProvider().fieldConstraints(2)
& QgsFieldConstraints.ConstraintUnique)
self.assertFalse(vl.dataProvider().fieldConstraints(3)
& QgsFieldConstraints.ConstraintUnique)
# test that constraints have been saved to fields correctly
fields = vl.fields()
self.assertTrue(fields.at(0).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertTrue(fields.at(1).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertTrue(fields.at(2).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(2).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(3).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
def testConstraintOverwrite(self):
""" test that Postgres provider constraints can't be overwritten by vector layer method """
vl = QgsVectorLayer('%s table="qgis_test"."constraints" sql=' % (
self.dbconn), "constraints", "postgres")
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().fieldConstraints(0) &
QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.fields().at(0).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
# add a constraint at the layer level
vl.setFieldConstraint(0, QgsFieldConstraints.ConstraintUnique)
# should be no change at provider level
self.assertTrue(vl.dataProvider().fieldConstraints(0) &
QgsFieldConstraints.ConstraintNotNull)
# but layer should still keep provider constraints...
self.assertTrue(vl.fields().at(0).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.fieldConstraints(
0) & QgsFieldConstraints.ConstraintNotNull)
# ...in addition to layer level constraint
self.assertTrue(vl.fields().at(0).constraints(
).constraints() & QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.fieldConstraints(
0) & QgsFieldConstraints.ConstraintUnique)
def testVectorLayerUtilsUniqueWithProviderDefault(self):
vl = QgsVectorLayer('%s table="qgis_test"."someData" sql=' %
(self.dbconn), "someData", "postgres")
default_clause = 'nextval(\'qgis_test."someData_pk_seq"\'::regclass)'
vl.dataProvider().setProviderProperty(
QgsDataProvider.EvaluateDefaultValues, False)
self.assertEqual(
vl.dataProvider().defaultValueClause(0), default_clause)
self.assertTrue(QgsVectorLayerUtils.valueExists(vl, 0, 4))
vl.startEditing()
f = QgsFeature(vl.fields())
f.setAttribute(0, default_clause)
self.assertFalse(
QgsVectorLayerUtils.valueExists(vl, 0, default_clause))
self.assertTrue(vl.addFeatures([f]))
# the default value clause should exist...
self.assertTrue(QgsVectorLayerUtils.valueExists(vl, 0, default_clause))
# but it should not prevent the attribute being validated
self.assertTrue(QgsVectorLayerUtils.validateAttribute(vl, f, 0))
vl.rollBack()
def testSkipConstraintCheck(self):
vl = QgsVectorLayer('%s table="qgis_test"."someData" sql=' %
(self.dbconn), "someData", "postgres")
default_clause = 'nextval(\'qgis_test."someData_pk_seq"\'::regclass)'
vl.dataProvider().setProviderProperty(
QgsDataProvider.EvaluateDefaultValues, False)
self.assertTrue(vl.dataProvider().skipConstraintCheck(
0, QgsFieldConstraints.ConstraintUnique, default_clause))
self.assertFalse(vl.dataProvider().skipConstraintCheck(
0, QgsFieldConstraints.ConstraintUnique, 59))
def testVectorLayerUtilsCreateFeatureWithProviderDefault(self):
vl = QgsVectorLayer('%s table="qgis_test"."someData" sql=' %
(self.dbconn), "someData", "postgres")
default_clause = 'nextval(\'qgis_test."someData_pk_seq"\'::regclass)'
self.assertEqual(
vl.dataProvider().defaultValueClause(0), default_clause)
# If an attribute map is provided, QgsVectorLayerUtils.createFeature must
# respect it, otherwise default values from provider are checked.
# User's choice will not be respected if the value violates unique constraints.
# See https://github.com/qgis/QGIS/issues/27758
f = QgsVectorLayerUtils.createFeature(vl, attributes={1: 5, 3: 'map'})
# changed so that createFeature respects user choice
self.assertEqual(f.attributes(), [
default_clause, 5, "'qgis'::text", 'map', None, None, None, None, None])
vl.setDefaultValueDefinition(3, QgsDefaultValue("'mappy'"))
# test ignore vector layer default value expression overrides postgres provider default clause,
# due to user's choice
f = QgsVectorLayerUtils.createFeature(vl, attributes={1: 5, 3: 'map'})
self.assertEqual(f.attributes(), [
default_clause, 5, "'qgis'::text", 'map', None, None, None, None, None])
# Since user did not enter a default for field 3, test must return the default value chosen
f = QgsVectorLayerUtils.createFeature(vl, attributes={1: 5})
self.assertEqual(f.attributes(), [
default_clause, 5, "'qgis'::text", 'mappy', None, None, None, None, None])
# See https://github.com/qgis/QGIS/issues/23127
def testNumericPrecision(self):
uri = 'point?field=f1:int'
uri += '&field=f2:double(6,4)'
uri += '&field=f3:string(20)'
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 1
f['f2'] = 123.456
f['f3'] = '12345678.90123456789'
lyr.dataProvider().addFeatures([f])
uri = '%s table="qgis_test"."b18155" (g) key=\'f1\'' % (self.dbconn)
self.execSQLCommand('DROP TABLE IF EXISTS qgis_test.b18155')
err = QgsVectorLayerExporter.exportLayer(
lyr, uri, "postgres", lyr.crs())
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(uri, "y", "postgres")
self.assertTrue(lyr.isValid())
f = next(lyr.getFeatures())
self.assertEqual(f['f1'], 1)
self.assertEqual(f['f2'], 123.456)
self.assertEqual(f['f3'], '12345678.90123456789')
# See https://github.com/qgis/QGIS/issues/23163
def testImportKey(self):
uri = 'point?field=f1:int'
uri += '&field=F2:double(6,4)'
uri += '&field=f3:string(20)'
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
def testKey(lyr, key, kfnames):
self.execSQLCommand('DROP TABLE IF EXISTS qgis_test.import_test')
uri = '%s table="qgis_test"."import_test" (g)' % self.dbconn
if key is not None:
uri += ' key=\'%s\'' % key
err = QgsVectorLayerExporter.exportLayer(
lyr, uri, "postgres", lyr.crs())
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
olyr = QgsVectorLayer(uri, "y", "postgres")
self.assertTrue(olyr.isValid())
flds = lyr.fields()
oflds = olyr.fields()
if key is None:
# if the pkey was not given, it will create a pkey
self.assertEqual(oflds.size(), flds.size() + 1)
self.assertEqual(oflds[0].name(), kfnames[0])
for i in range(flds.size()):
self.assertEqual(oflds[i + 1].name(), flds[i].name())
else:
# pkey was given, no extra field generated
self.assertEqual(oflds.size(), flds.size())
for i in range(oflds.size()):
self.assertEqual(oflds[i].name(), flds[i].name())
pks = olyr.primaryKeyAttributes()
self.assertEqual(len(pks), len(kfnames))
for i in range(0, len(kfnames)):
self.assertEqual(oflds[pks[i]].name(), kfnames[i])
testKey(lyr, 'f1', ['f1'])
testKey(lyr, '"f1"', ['f1'])
testKey(lyr, '"f1","F2"', ['f1', 'F2'])
testKey(lyr, '"f1","F2","f3"', ['f1', 'F2', 'f3'])
testKey(lyr, None, ['id'])
# See https://github.com/qgis/QGIS/issues/25415
def testImportWithoutSchema(self):
def _test(table, schema=None):
self.execSQLCommand('DROP TABLE IF EXISTS %s CASCADE' % table)
uri = 'point?field=f1:int'
uri += '&field=F2:double(6,4)'
uri += '&field=f3:string(20)'
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
table = ("%s" % table) if schema is None else (
"\"%s\".\"%s\"" % (schema, table))
dest_uri = "%s sslmode=disable table=%s (geom) sql" % (
self.dbconn, table)
QgsVectorLayerExporter.exportLayer(
lyr, dest_uri, "postgres", lyr.crs())
olyr = QgsVectorLayer(dest_uri, "y", "postgres")
self.assertTrue(olyr.isValid(), "Failed URI: %s" % dest_uri)
# Test bug 17518
_test('b17518')
# Test fully qualified table (with schema)
_test("b17518", "qgis_test")
# Test empty schema
_test("b17518", "")
# Test public schema
_test("b17518", "public")
# Test fully qualified table (with wrong schema)
with self.assertRaises(AssertionError):
_test("b17518", "qgis_test_wrong")
def testStyle(self):
self.execSQLCommand('DROP TABLE IF EXISTS layer_styles CASCADE')
vl = self.getEditableLayer()
self.assertTrue(vl.isValid())
self.assertTrue(
vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
self.assertTrue(vl.dataProvider().isDeleteStyleFromDatabaseSupported())
# table layer_styles does not exit
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, -1)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
mFilePath = QDir.toNativeSeparators(
'%s/symbol_layer/%s.qml' % (unitTestDataPath(), "singleSymbol"))
status = vl.loadNamedStyle(mFilePath)
self.assertTrue(status)
# The style is saved as non-default
errorMsg = vl.saveStyleToDatabase(
"by day", "faded greens and elegant patterns", False, "")
self.assertEqual(errorMsg, "")
# the style id should be "1", not "by day"
qml, errmsg = vl.getStyleFromDatabase("by day")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ["1"])
self.assertEqual(namelist, ["by day"])
self.assertEqual(desclist, ["faded greens and elegant patterns"])
qml, errmsg = vl.getStyleFromDatabase("100")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertTrue(qml.startswith('<!DOCTYPE qgis'), qml)
self.assertEqual(errmsg, "")
res, errmsg = vl.deleteStyleFromDatabase("100")
self.assertTrue(res)
self.assertEqual(errmsg, "")
res, errmsg = vl.deleteStyleFromDatabase("1")
self.assertTrue(res)
self.assertEqual(errmsg, "")
# We save now the style again twice but with one as default
errorMsg = vl.saveStyleToDatabase(
"related style", "faded greens and elegant patterns", False, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase(
"default style", "faded greens and elegant patterns", True, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 2)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ["3", "2"]) # Ids must be reversed.
self.assertEqual(namelist, ["default style", "related style"])
self.assertEqual(desclist, ["faded greens and elegant patterns"] * 2)
# We remove these 2 styles
res, errmsg = vl.deleteStyleFromDatabase("2")
self.assertTrue(res)
self.assertEqual(errmsg, "")
res, errmsg = vl.deleteStyleFromDatabase("3")
self.assertTrue(res)
self.assertEqual(errmsg, "")
# table layer_styles does exit, but is now empty
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 0)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertEqual(errmsg, "")
def testStyleWithGeometryType(self):
"""Test saving styles with the additional geometry type
Layers are created from geometries_table
"""
myconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
myconn = os.environ['QGIS_PGTEST_DB']
# point layer
myPoint = QgsVectorLayer(
myconn +
' sslmode=disable srid=4326 type=POINT table="qgis_test"."geometries_table" (geom) sql=', 'Point',
'postgres')
self.assertTrue(myPoint.isValid())
myPoint.saveStyleToDatabase('myPointStyle', '', False, '')
# polygon layer
myPolygon = QgsVectorLayer(
myconn +
' sslmode=disable srid=4326 type=POLYGON table="qgis_test"."geometries_table" (geom) sql=', 'Poly',
'postgres')
self.assertTrue(myPoint.isValid())
myPolygon.saveStyleToDatabase('myPolygonStyle', '', False, '')
# how many
related_count, idlist, namelist, desclist, errmsg = myPolygon.listStylesInDatabase()
self.assertEqual(len(idlist), 2)
self.assertEqual(namelist, ['myPolygonStyle', 'myPointStyle'])
# raw psycopg2 query
self.assertTrue(self.con)
cur = self.con.cursor()
self.assertTrue(cur)
cur.execute("select stylename, type from layer_styles order by type")
self.assertEqual(cur.fetchall(), [
('myPointStyle', 'Point'), ('myPolygonStyle', 'Polygon')])
cur.close()
# delete them
myPolygon.deleteStyleFromDatabase(idlist[1])
myPolygon.deleteStyleFromDatabase(idlist[0])
styles = myPolygon.listStylesInDatabase()
ids = styles[1]
self.assertEqual(len(ids), 0)
def testSaveStyleInvalidXML(self):
self.execSQLCommand('DROP TABLE IF EXISTS layer_styles CASCADE')
vl = self.getEditableLayer()
self.assertTrue(vl.isValid())
self.assertTrue(
vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
self.assertTrue(vl.dataProvider().isDeleteStyleFromDatabaseSupported())
mFilePath = QDir.toNativeSeparators(
'%s/symbol_layer/%s.qml' % (unitTestDataPath(), "fontSymbol"))
status = vl.loadNamedStyle(mFilePath)
self.assertTrue(status)
errorMsg = vl.saveStyleToDatabase(
"fontSymbol", "font with invalid utf8 char", False, "")
self.assertEqual(errorMsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertTrue('v="\u001E"' in qml)
self.assertEqual(errmsg, "")
# Test loadStyle from metadata
md = QgsProviderRegistry.instance().providerMetadata('postgres')
qml = md.loadStyle(self.dbconn + " type=POINT table=\"qgis_test\".\"editData\" (geom)", 'fontSymbol')
self.assertTrue(qml.startswith('<!DOCTYPE qgi'), qml)
self.assertTrue('v="\u001E"' in qml)
def testHasMetadata(self):
# views don't have metadata
vl = QgsVectorLayer('{} table="qgis_test"."{}" key="pk" sql='.format(self.dbconn, 'bikes_view'), "bikes_view",
"postgres")
self.assertTrue(vl.isValid())
self.assertFalse(vl.dataProvider().hasMetadata())
# ordinary tables have metadata
vl = QgsVectorLayer('%s table="qgis_test"."someData" sql=' %
(self.dbconn), "someData", "postgres")
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().hasMetadata())
def testReadExtentOnView(self):
# vector layer based on view
vl0 = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data_view" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl0.isValid())
self.assertFalse(vl0.dataProvider().hasMetadata())
# set a custom extent
originalExtent = vl0.extent()
customExtent = QgsRectangle(-80, 80, -70, 90)
vl0.setExtent(customExtent)
# write xml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(vl0.writeLayerXml(elem, doc, QgsReadWriteContext()))
# read xml with the custom extent. It should not be used by default
vl1 = QgsVectorLayer()
vl1.readLayerXml(elem, QgsReadWriteContext())
self.assertTrue(vl1.isValid())
self.assertEqual(vl1.extent(), originalExtent)
# read xml with custom extent with readExtent option. Extent read from
# xml document should be used because we have a view
vl2 = QgsVectorLayer()
vl2.setReadExtentFromXml(True)
vl2.readLayerXml(elem, QgsReadWriteContext())
self.assertTrue(vl2.isValid())
self.assertEqual(vl2.extent(), customExtent)
# but a force update on extent should allow retrieveing the data
# provider extent
vl2.updateExtents()
vl2.readLayerXml(elem, QgsReadWriteContext())
self.assertEqual(vl2.extent(), customExtent)
vl2.updateExtents(force=True)
vl2.readLayerXml(elem, QgsReadWriteContext())
self.assertEqual(vl2.extent(), originalExtent)
def testReadExtentOnTable(self):
# vector layer based on a standard table
vl0 = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl0.isValid())
self.assertTrue(vl0.dataProvider().hasMetadata())
# set a custom extent
originalExtent = vl0.extent()
customExtent = QgsRectangle(-80, 80, -70, 90)
vl0.setExtent(customExtent)
# write xml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(vl0.writeLayerXml(elem, doc, QgsReadWriteContext()))
# read xml with the custom extent. It should not be used by default
vl1 = QgsVectorLayer()
vl1.readLayerXml(elem, QgsReadWriteContext())
self.assertTrue(vl1.isValid())
self.assertEqual(vl1.extent(), originalExtent)
# read xml with custom extent with readExtent option. Extent read from
# xml document should be used even if we don't have a view or a
# materialized view
vl2 = QgsVectorLayer()
vl2.setReadExtentFromXml(True)
vl2.readLayerXml(elem, QgsReadWriteContext())
self.assertTrue(vl2.isValid())
self.assertEqual(vl2.extent(), customExtent)
# but a force update on extent should allow retrieveing the data
# provider extent
vl2.updateExtents()
vl2.readLayerXml(elem, QgsReadWriteContext())
self.assertEqual(vl2.extent(), customExtent)
vl2.updateExtents(force=True)
vl2.readLayerXml(elem, QgsReadWriteContext())
self.assertEqual(vl2.extent(), originalExtent)
def testDeterminePkey(self):
"""Test primary key auto-determination"""
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable srid=4326 type=POLYGON table="qgis_test"."authors" sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().hasMetadata())
self.assertTrue("key='pk'" in vl.source())
def testCheckPkUnicityOnView(self):
# vector layer based on view
# This is valid
vl0 = QgsVectorLayer(
self.dbconn +
' checkPrimaryKeyUnicity=\'0\' sslmode=disable key=\'pk\' srid=0 type=POINT table="qgis_test"."b21839_pk_unicity_view" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl0.isValid())
geom = vl0.getFeature(1).geometry().asWkt()
# This is NOT valid
vl0 = QgsVectorLayer(
self.dbconn +
' checkPrimaryKeyUnicity=\'1\' sslmode=disable key=\'an_int\' srid=0 type=POINT table="qgis_test"."b21839_pk_unicity_view" (geom) sql=',
'test', 'postgres')
self.assertFalse(vl0.isValid())
# This is NOT valid because the default is to check unicity
vl0 = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'an_int\' srid=0 type=POINT table="qgis_test"."b21839_pk_unicity_view" (geom) sql=',
'test', 'postgres')
self.assertFalse(vl0.isValid())
# This is valid because the readExtentFromXml option is set
# loadDefaultStyle, readExtentFromXml
options = QgsVectorLayer.LayerOptions(True, True)
vl0 = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'an_int\' srid=0 type=POINT table="qgis_test"."b21839_pk_unicity_view" (geom) sql=',
'test', 'postgres', options)
self.assertTrue(vl0.isValid())
# Valid because a_unique_int is unique and default is to check unicity
vl0 = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'a_unique_int\' srid=0 type=POINT table="qgis_test"."b21839_pk_unicity_view" (geom) sql=',
'test', 'postgres')
self.assertEqual(vl0.getFeature(1).geometry().asWkt(), geom)
# Valid because a_unique_int is unique
vl0 = QgsVectorLayer(
self.dbconn +
' checkPrimaryKeyUnicity=\'1\' sslmode=disable key=\'a_unique_int\' srid=0 type=POINT table="qgis_test"."b21839_pk_unicity_view" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl0.isValid())
self.assertEqual(vl0.getFeature(1).geometry().asWkt(), geom)
def testNotify(self):
vl0 = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="qgis_test"."some_poly_data" (geom) sql=',
'test', 'postgres')
vl0.dataProvider().setListening(True)
class Notified(QObject):
def __init__(self):
super(Notified, self).__init__()
self.received = ""
def receive(self, msg):
self.received = msg
notified = Notified()
vl0.dataProvider().notify.connect(notified.receive)
vl0.dataProvider().setListening(True)
cur = self.con.cursor()
ok = False
start = time.time()
while True:
cur.execute("NOTIFY qgis, 'my message'")
self.con.commit()
QGISAPP.processEvents()
if notified.received == "my message":
ok = True
break
if (time.time() - start) > 5: # timeout
break
vl0.dataProvider().notify.disconnect(notified.receive)
vl0.dataProvider().setListening(False)
self.assertTrue(ok)
def testStyleDatabaseWithService(self):
"""Test saving style in DB using a service file.
To run this test, you first need to setup the test
database with tests/testdata/provider/testdata_pg.sh
"""
myconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
myconn = os.environ['QGIS_PGTEST_DB']
myvl = QgsVectorLayer(
myconn +
' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."someData" (geom) sql=',
'test', 'postgres')
styles = myvl.listStylesInDatabase()
ids = styles[1]
self.assertEqual(len(ids), 0)
myvl.saveStyleToDatabase('mystyle', '', False, '')
styles = myvl.listStylesInDatabase()
ids = styles[1]
self.assertEqual(len(ids), 1)
myvl.deleteStyleFromDatabase(ids[0])
styles = myvl.listStylesInDatabase()
ids = styles[1]
self.assertEqual(len(ids), 0)
def testCurveToMultipolygon(self):
self.execSQLCommand(
'CREATE TABLE IF NOT EXISTS multicurve(pk SERIAL NOT NULL PRIMARY KEY, geom public.geometry(MultiPolygon, 4326))')
self.execSQLCommand('TRUNCATE multicurve')
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=MULTIPOLYGON table="multicurve" (geom) sql=',
'test', 'postgres')
f = QgsFeature(vl.fields())
f.setGeometry(QgsGeometry.fromWkt(
'CurvePolygon(CircularString (20 30, 50 30, 50 90, 10 50, 20 30))'))
self.assertTrue(vl.startEditing())
self.assertTrue(vl.addFeatures([f]))
self.assertTrue(vl.commitChanges())
f = next(vl.getFeatures(QgsFeatureRequest()))
g = f.geometry().constGet()
self.assertTrue(g)
self.assertEqual(g.wkbType(), QgsWkbTypes.MultiPolygon)
self.assertEqual(g.childCount(), 1)
self.assertTrue(g.childGeometry(0).vertexCount() > 3)
def testMassivePaste(self):
"""Speed test to compare createFeature and createFeatures, for regression #21303"""
import time
self.execSQLCommand(
'CREATE TABLE IF NOT EXISTS massive_paste(pk SERIAL NOT NULL PRIMARY KEY, geom public.geometry(Polygon, 4326))')
self.execSQLCommand('TRUNCATE massive_paste')
start_time = time.time()
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="massive_paste" (geom) sql=',
'test_massive_paste', 'postgres')
self.assertTrue(vl.startEditing())
features = []
context = vl.createExpressionContext()
for i in range(4000):
features.append(
QgsVectorLayerUtils.createFeature(vl, QgsGeometry.fromWkt('Polygon ((7 44, 8 45, 8 46, 7 46, 7 44))'),
{0: i}, context))
self.assertTrue(vl.addFeatures(features))
self.assertTrue(vl.commitChanges())
self.assertEqual(vl.featureCount(), 4000)
print("--- %s seconds ---" % (time.time() - start_time))
self.execSQLCommand('TRUNCATE massive_paste')
start_time = time.time()
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="massive_paste" (geom) sql=',
'test_massive_paste', 'postgres')
self.assertTrue(vl.startEditing())
features_data = []
context = vl.createExpressionContext()
for i in range(4000):
features_data.append(
QgsVectorLayerUtils.QgsFeatureData(QgsGeometry.fromWkt('Polygon ((7 44, 8 45, 8 46, 7 46, 7 44))'),
{0: i}))
features = QgsVectorLayerUtils.createFeatures(
vl, features_data, context)
self.assertTrue(vl.addFeatures(features))
self.assertTrue(vl.commitChanges())
self.assertEqual(vl.featureCount(), 4000)
print("--- %s seconds ---" % (time.time() - start_time))
def testFilterOnCustomBbox(self):
extent = QgsRectangle(-68, 70, -67, 80)
request = QgsFeatureRequest().setFilterRect(extent)
dbconn = 'service=qgis_test'
uri = '%s srid=4326 key="pk" sslmode=disable table="qgis_test"."some_poly_data_shift_bbox" (geom)' % (
dbconn)
def _test(vl, ids):
values = {feat['pk']: 'x' for feat in vl.getFeatures(request)}
expected = {x: 'x' for x in ids}
self.assertEqual(values, expected)
vl = QgsVectorLayer(uri, "testgeom", "postgres")
self.assertTrue(vl.isValid())
_test(vl, [2, 3])
vl = QgsVectorLayer(uri + ' bbox=shiftbox', "testgeom", "postgres")
self.assertTrue(vl.isValid())
_test(vl, [1, 3])
def testValidLayerDiscoverRelationsNone(self):
"""
Test checks that discover relation feature can be used on a layer that has no relation.
"""
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."someData" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
self.assertEqual(vl.dataProvider().discoverRelations(vl, []), [])
def testInvalidLayerDiscoverRelations(self):
"""
Test that discover relations feature can be used on invalid layer.
"""
vl = QgsVectorLayer('{} table="qgis_test"."invalid_layer" sql='.format(self.dbconn), "invalid_layer",
"postgres")
self.assertFalse(vl.isValid())
self.assertEqual(vl.dataProvider().discoverRelations(vl, []), [])
def testValidLayerDiscoverRelations(self):
"""
Test implicit relations that can be discovers between tables, based on declared foreign keys.
The test also checks that two distinct relations can be discovered when two foreign keys are declared (see #41138).
"""
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' checkPrimaryKeyUnicity=\'1\' table="qgis_test"."referencing_layer"',
'referencing_layer', 'postgres')
vls = [
QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk_ref_1\' checkPrimaryKeyUnicity=\'1\' table="qgis_test"."referenced_layer_1"',
'referenced_layer_1', 'postgres'),
QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk_ref_2\' checkPrimaryKeyUnicity=\'1\' table="qgis_test"."referenced_layer_2"',
'referenced_layer_2', 'postgres'),
vl
]
for lyr in vls:
self.assertTrue(lyr.isValid())
QgsProject.instance().addMapLayer(lyr)
relations = vl.dataProvider().discoverRelations(vl, vls)
self.assertEqual(len(relations), 2)
for i, r in enumerate(relations):
self.assertEqual(r.referencedLayer(), vls[i])
def testCheckTidPkOnViews(self):
"""Test vector layer based on a view with `ctid` as a key"""
# This is valid
vl0 = QgsVectorLayer(
self.dbconn +
' checkPrimaryKeyUnicity=\'0\' sslmode=disable key=\'ctid\' srid=4326 type=POINT table="qgis_test"."b31799_test_view_ctid" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl0.isValid())
self.assertEqual(vl0.featureCount(), 10)
for f in vl0.getFeatures():
self.assertNotEqual(f.attribute(0), NULL)
def testFeatureCountEstimatedOnTable(self):
"""
Test feature count on table when estimated data is enabled
"""
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' estimatedmetadata=true srid=4326 type=POINT table="qgis_test"."someData" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
self.assertTrue(vl.featureCount() > 0)
def testFeatureCountEstimatedOnView(self):
"""
Test feature count on view when estimated data is enabled
"""
self.execSQLCommand('DROP VIEW IF EXISTS qgis_test.somedataview')
self.execSQLCommand(
'CREATE VIEW qgis_test.somedataview AS SELECT * FROM qgis_test."someData"')
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' estimatedmetadata=true srid=4326 type=POINT table="qgis_test"."somedataview" (geom) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
self.assertTrue(vl.featureCount() > 0)
def testIdentityPk(self):
"""Test a table with identity pk, see GH #29560"""
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'gid\' srid=4326 type=POLYGON table="qgis_test"."b29560"(geom) sql=',
'testb29560', 'postgres')
self.assertTrue(vl.isValid())
feature = QgsFeature(vl.fields())
geom = QgsGeometry.fromWkt('POLYGON EMPTY')
feature.setGeometry(geom)
self.assertTrue(vl.dataProvider().addFeature(feature))
self.assertEqual(vl.dataProvider().defaultValueClause(0), "nextval('b29560_gid_seq'::regclass)")
del (vl)
# Verify
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'gid\' srid=4326 type=POLYGON table="qgis_test"."b29560"(geom) sql=',
'testb29560', 'postgres')
self.assertTrue(vl.isValid())
feature = next(vl.getFeatures())
self.assertIsNotNone(feature.id())
@unittest.skipIf(os.environ.get('QGIS_CONTINUOUS_INTEGRATION_RUN', 'true'), 'Test flaky')
def testDefaultValuesAndClauses(self):
"""Test whether default values like CURRENT_TIMESTAMP or
now() they are respected. See GH #33383"""
# Create the test table
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable table="public"."test_table_default_values" sql=', 'test',
'postgres')
self.assertTrue(vl.isValid())
dp = vl.dataProvider()
# Clean the table
dp.deleteFeatures(dp.allFeatureIds())
# Save it for the test
now = datetime.now()
# Test default values
dp.setProviderProperty(QgsDataProvider.EvaluateDefaultValues, 1)
# FIXME: spatialite provider (and OGR) return a NULL here and the following passes
# self.assertTrue(dp.defaultValue(0).isNull())
self.assertIsNotNone(dp.defaultValue(0))
self.assertIsNone(dp.defaultValue(1))
self.assertTrue(dp.defaultValue(
2).startswith(now.strftime('%Y-%m-%d')))
self.assertTrue(dp.defaultValue(
3).startswith(now.strftime('%Y-%m-%d')))
self.assertEqual(dp.defaultValue(4), 123)
self.assertEqual(dp.defaultValue(5), 'My default')
# FIXME: the provider should return the clause definition
# regardless of the EvaluateDefaultValues setting
dp.setProviderProperty(QgsDataProvider.EvaluateDefaultValues, 0)
self.assertEqual(dp.defaultValueClause(
0), "nextval('test_table_default_values_id_seq'::regclass)")
self.assertEqual(dp.defaultValueClause(1), '')
self.assertEqual(dp.defaultValueClause(2), "now()")
self.assertEqual(dp.defaultValueClause(3), "CURRENT_TIMESTAMP")
self.assertEqual(dp.defaultValueClause(4), '123')
self.assertEqual(dp.defaultValueClause(5), "'My default'::text")
# FIXME: the test fails if the value is not reset to 1
dp.setProviderProperty(QgsDataProvider.EvaluateDefaultValues, 1)
feature = QgsFeature(vl.fields())
for idx in range(vl.fields().count()):
default = vl.dataProvider().defaultValue(idx)
if default is not None:
feature.setAttribute(idx, default)
else:
feature.setAttribute(idx, 'A comment')
self.assertTrue(vl.dataProvider().addFeature(feature))
del (vl)
# Verify
vl2 = QgsVectorLayer(self.dbconn + ' sslmode=disable table="public"."test_table_default_values" sql=', 'test',
'postgres')
self.assertTrue(vl2.isValid())
feature = next(vl2.getFeatures())
self.assertEqual(feature.attribute(1), 'A comment')
self.assertTrue(feature.attribute(
2).startswith(now.strftime('%Y-%m-%d')))
self.assertTrue(feature.attribute(
3).startswith(now.strftime('%Y-%m-%d')))
self.assertEqual(feature.attribute(4), 123)
self.assertEqual(feature.attribute(5), 'My default')
def testEncodeDecodeUri(self):
"""Test PG encode/decode URI"""
md = QgsProviderRegistry.instance().providerMetadata('postgres')
self.assertEqual(md.decodeUri(
'dbname=\'qgis_tests\' host=localhost port=5432 user=\'myuser\' sslmode=disable estimatedmetadata=true srid=3067 table="public"."basic_map_tiled" (rast)'),
{'dbname': 'qgis_tests',
'estimatedmetadata': True,
'geometrycolumn': 'rast',
'host': 'localhost',
'port': '5432',
'schema': 'public',
'srid': '3067',
'sslmode': 1,
'table': 'basic_map_tiled',
'username': 'myuser'})
self.assertEqual(md.decodeUri(
'dbname=\'qgis_tests\' host=localhost port=5432 user=\'myuser\' sslmode=disable key=\'id\' estimatedmetadata=true srid=3763 type=MultiPolygon checkPrimaryKeyUnicity=\'1\' table="public"."copas1" (geom)'),
{'dbname': 'qgis_tests',
'estimatedmetadata': True,
'geometrycolumn': 'geom',
'host': 'localhost',
'key': 'id',
'port': '5432',
'schema': 'public',
'srid': '3763',
'sslmode': 1,
'table': 'copas1',
'type': 6,
'username': 'myuser'})
self.assertEqual(md.encodeUri({'dbname': 'qgis_tests',
'estimatedmetadata': True,
'geometrycolumn': 'geom',
'host': 'localhost',
'key': 'id',
'port': '5432',
'schema': 'public',
'srid': '3763',
'sslmode': 1,
'table': 'copas1',
'type': 6,
'username': 'myuser'}),
"dbname='qgis_tests' user='myuser' srid=3763 estimatedmetadata='true' host='localhost' key='id' port='5432' sslmode='disable' type='MultiPolygon' table=\"public\".\"copas1\" (geom)")
self.assertEqual(md.encodeUri({'dbname': 'qgis_tests',
'estimatedmetadata': True,
'geometrycolumn': 'rast',
'host': 'localhost',
'port': '5432',
'schema': 'public',
'srid': '3067',
'sslmode': 1,
'table': 'basic_map_tiled',
'username': 'myuser'}),
"dbname='qgis_tests' user='myuser' srid=3067 estimatedmetadata='true' host='localhost' port='5432' sslmode='disable' table=\"public\".\"basic_map_tiled\" (rast)")
def _round_trip(uri):
decoded = md.decodeUri(uri)
self.assertEqual(decoded, md.decodeUri(md.encodeUri(decoded)))
uri = self.dbconn + \
' sslmode=disable key=\'gid\' srid=3035 table="public"."my_pg_vector" sql='
decoded = md.decodeUri(uri)
self.assertEqual(decoded, {
'key': 'gid',
'schema': 'public',
'service': 'qgis_test',
'srid': '3035',
'sslmode': QgsDataSourceUri.SslDisable,
'table': 'my_pg_vector',
})
_round_trip(uri)
uri = self.dbconn + \
' sslmode=prefer key=\'gid\' srid=3035 temporalFieldIndex=2 ' + \
'authcfg=afebeff username=\'my username\' password=\'my secret password=\' ' + \
'table="public"."my_pg_vector" (the_geom) sql="a_field" != 1223223'
_round_trip(uri)
decoded = md.decodeUri(uri)
self.assertEqual(decoded, {
'authcfg': 'afebeff',
'geometrycolumn': 'the_geom',
'key': 'gid',
'password': 'my secret password=',
'schema': 'public',
'service': 'qgis_test',
'sql': '"a_field" != 1223223',
'srid': '3035',
'sslmode': QgsDataSourceUri.SslPrefer,
'table': 'my_pg_vector',
'username': 'my username',
})
def testHasSpatialIndex(self):
for layer_name in ('hspi_table', 'hspi_materialized_view'):
columns = {'geom_without_index': QgsFeatureSource.SpatialIndexNotPresent, 'geom_with_index': QgsFeatureSource.SpatialIndexPresent}
for (geometry_column, spatial_index) in columns.items():
conn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
conn = os.environ['QGIS_PGTEST_DB']
vl = QgsVectorLayer(
conn +
' sslmode=disable key=\'id\' srid=4326 type=\'Polygon\' table="qgis_test"."{n}" ({c}) sql='.format(n=layer_name, c=geometry_column),
'test', 'postgres')
self.assertTrue(vl.isValid())
self.assertEqual(vl.hasSpatialIndex(), spatial_index)
def testBBoxFilterOnGeographyType(self):
"""Test bounding box filter on geography type"""
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."testgeog" (geog) sql=',
'test', 'postgres')
self.assertTrue(vl.isValid())
def _test(vl, extent, ids):
request = QgsFeatureRequest().setFilterRect(extent)
values = {feat['pk']: 'x' for feat in vl.getFeatures(request)}
expected = {x: 'x' for x in ids}
self.assertEqual(values, expected)
_test(vl, QgsRectangle(40 - 0.01, -0.01, 40 + 0.01, 0.01), [1])
_test(vl, QgsRectangle(40 - 5, -5, 40 + 5, 5), [1])
_test(vl, QgsRectangle(40 - 5, 0, 40 + 5, 5), [1])
_test(vl, QgsRectangle(40 - 10, -10, 40 + 10, 10), [1]) # no use of spatial index currently
_test(vl, QgsRectangle(40 - 5, 0.01, 40 + 5, 5), []) # no match
_test(vl, QgsRectangle(40 - 0.01, 60 - 0.01, 40 + 0.01, 60 + 0.01), [2])
_test(vl, QgsRectangle(40 - 5, 60 - 5, 40 + 5, 60 + 5), [2])
_test(vl, QgsRectangle(40 - 5, 60 - 0.01, 40 + 5, 60 + 9.99), [2])
_test(vl, QgsRectangle(40 - 0.01, -60 - 0.01, 40 + 0.01, -60 + 0.01), [3])
_test(vl, QgsRectangle(40 - 5, -60 - 5, 40 + 5, -60 + 5), [3])
_test(vl, QgsRectangle(40 - 5, -60 - 9.99, 40 + 5, -60 + 0.01), [3])
_test(vl, QgsRectangle(-181, -90, 181, 90), [1, 2, 3]) # no use of spatial index currently
def testReadCustomSRID(self):
"""Test that we can correctly read the SRS from a custom SRID"""
md = QgsProviderRegistry.instance().providerMetadata("postgres")
conn = md.createConnection(self.dbconn, {})
# Cleanup if needed
try:
conn.dropVectorTable('qgis_test', 'test_custom_srid')
except QgsProviderConnectionException:
pass
conn.executeSql("DELETE FROM spatial_ref_sys WHERE srid = 543210 AND auth_name='FOO' AND auth_srid=32600;")
conn.executeSql("""INSERT INTO spatial_ref_sys (srid, auth_name, auth_srid, srtext, proj4text) VALUES (543210, 'FOO', 32600, 'PROJCS["my_projection",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]','+proj=tmerc +lat_0=0 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs');""")
conn.executeSql('''
CREATE TABLE "qgis_test"."test_custom_srid" (
gid serial primary key,
geom geometry(Point, 543210)
);''')
layer = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\'table="qgis_test"."test_custom_srid" (geom) sql=', 'test', 'postgres')
conn.executeSql("DELETE FROM spatial_ref_sys WHERE srid = 543210 AND auth_name='FOO' AND auth_srid=32600;")
self.assertTrue(layer.isValid())
self.assertEqual(layer.crs().description(), 'my_projection')
def testSingleMultiColumnPkSmallData(self):
"""Test Single and Multi Column PK, `Small` Data"""
from itertools import combinations
def test_for_pk_combinations(test_type_list, pk_column_name_list, fids_get_count):
pk_column_name = ','.join(pk_column_name_list)
set_new_pk = '''
ALTER TABLE qgis_test.multi_column_pk_small_data_table DROP CONSTRAINT multi_column_pk_small_data_pk;
ALTER TABLE qgis_test.multi_column_pk_small_data_table
ADD CONSTRAINT multi_column_pk_small_data_pk PRIMARY KEY ({});'''
set_new_layer = ' sslmode=disable key=\'{}\' srid=3857 type=POLYGON table="qgis_test"."multi_column_pk_small_data_{}" (geom) sql='
error_string = 'from {} with PK - {} : expected {}, got {}'
if 'table' in test_type_list:
self.execSQLCommand(set_new_pk.format(pk_column_name))
for test_type in test_type_list:
vl = QgsVectorLayer(self.dbconn + set_new_layer.format(pk_column_name, test_type), 'test_multi_column_pk_small_data', 'postgres')
fids = [f.id() for f in vl.getFeatures(QgsFeatureRequest().setLimit(fids_get_count))]
fids2 = [f.id() for f in vl.getFeatures(fids)]
self.assertEqual(fids_get_count, len(fids), "Get with limit " +
error_string.format(test_type, pk_column_name, fids_get_count, len(fids)))
self.assertEqual(fids_get_count, len(fids2), "Get by fids " +
error_string.format(test_type, pk_column_name, fids_get_count, len(fids2)))
self.execSQLCommand('DROP TABLE IF EXISTS qgis_test.multi_column_pk_small_data_table CASCADE;')
self.execSQLCommand('''
CREATE TABLE qgis_test.multi_column_pk_small_data_table (
id_serial serial NOT NULL,
id_uuid uuid NOT NULL,
id_int int NOT NULL,
id_bigint bigint NOT NULL,
id_str character varying(20) NOT NULL,
id_inet4 inet NOT NULL,
id_inet6 inet NOT NULL,
id_cidr4 cidr NOT NULL,
id_cidr6 cidr NOT NULL,
id_macaddr macaddr NOT NULL,
id_macaddr8 macaddr8 NOT NULL,
id_timestamp timestamp with time zone NOT NULL,
id_half_null_uuid uuid,
id_all_null_uuid uuid,
geom geometry(Polygon,3857),
CONSTRAINT multi_column_pk_small_data_pk
PRIMARY KEY (id_serial, id_uuid, id_int, id_bigint, id_str) );''')
self.execSQLCommand('''
CREATE OR REPLACE VIEW qgis_test.multi_column_pk_small_data_view AS
SELECT * FROM qgis_test.multi_column_pk_small_data_table;
DROP MATERIALIZED VIEW IF EXISTS qgis_test.multi_column_pk_small_data_mat_view;
CREATE MATERIALIZED VIEW qgis_test.multi_column_pk_small_data_mat_view AS
SELECT * FROM qgis_test.multi_column_pk_small_data_table;''')
self.execSQLCommand('''
TRUNCATE qgis_test.multi_column_pk_small_data_table;
INSERT INTO qgis_test.multi_column_pk_small_data_table(
id_uuid, id_int, id_bigint, id_str, id_inet4, id_inet6, id_cidr4, id_cidr6,
id_macaddr, id_macaddr8, id_timestamp, id_half_null_uuid, id_all_null_uuid, geom)
SELECT
( (10000000)::text || (100000000000 + dy)::text || (100000000000 + dx)::text )::uuid,
dx + 1000000 * dy, --id_int
dx + 1000000 * dy, --id_bigint
dx || E\' ot\\'her \' || dy, --id_str
(\'192.168.0.1\'::inet + dx + 100 * dy )::inet, --id_inet4
(\'2001:4f8:3:ba:2e0:81ff:fe22:d1f1\'::inet + dx + 100 * dy )::inet, --id_inet6
(\'192.168.0.1\'::cidr + dx + 100 * dy )::cidr, --id_cidr4
(\'2001:4f8:3:ba:2e0:81ff:fe22:d1f1\'::cidr + dx + 100 * dy )::cidr, --id_cidr6
((112233445566 + dx + 100 * dy)::text)::macaddr, --id_macaddr
((1122334455667788 + dx + 100 * dy)::text)::macaddr8, --id_macaddr8
now() - ((dx||\' hour\')::text)::interval - ((dy||\' day\')::text)::interval,
NULLIF( ( (10000000)::text || (100000000000 + dy)::text || (100000000000 + dx)::text )::uuid,
( (10000000)::text || (100000000000 + dy + dy%2)::text || (100000000000 + dx)::text )::uuid ),
NULL,
ST_Translate(
ST_GeomFromText(\'POLYGON((3396900.0 6521800.0,3396900.0 6521870.0,
3396830.0 6521870.0,3396830.0 6521800.0,3396900.0 6521800.0))\', 3857 ),
100.0 * dx,
100.0 * dy )
FROM generate_series(1,3) dx, generate_series(1,3) dy;
REFRESH MATERIALIZED VIEW qgis_test.multi_column_pk_small_data_mat_view;''')
pk_col_list = ("id_serial", "id_uuid", "id_int", "id_bigint", "id_str", "id_inet4", "id_inet6", "id_cidr4", "id_cidr6", "id_macaddr", "id_macaddr8")
test_type_list = ["table", "view", "mat_view"]
for n in [1, 2, len(pk_col_list)]:
pk_col_set_list = list(combinations(pk_col_list, n))
for pk_col_set in pk_col_set_list:
test_for_pk_combinations(test_type_list, pk_col_set, 7)
for col_name in ["id_serial", "id_uuid", "id_int", "id_bigint", "id_str", "id_inet4"]:
test_for_pk_combinations(["view", "mat_view"], ["id_half_null_uuid", col_name], 7)
test_for_pk_combinations(["view", "mat_view"], ["id_all_null_uuid", col_name], 7)
class TestPyQgsPostgresProviderCompoundKey(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layers
cls.vl = QgsVectorLayer(
cls.dbconn +
' sslmode=disable key=\'"key1","key2"\' srid=4326 type=POINT table="qgis_test"."someDataCompound" (geom) sql=',
'test', 'postgres')
assert cls.vl.isValid()
cls.source = cls.vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
return True
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
def uncompiledFilters(self):
return set(['"dt" = to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\')',
'"date" = to_date(\'www4ww5ww2020\',\'wwwdwwMwwyyyy\')',
'"time" = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')'])
def partiallyCompiledFilters(self):
return set([])
def testConstraints(self):
for key in ["key1", "key2"]:
idx = self.vl.dataProvider().fieldNameIndex(key)
self.assertTrue(idx >= 0)
self.assertFalse(self.vl.dataProvider().fieldConstraints(
idx) & QgsFieldConstraints.ConstraintUnique)
def testCompoundPkChanges(self):
""" Check if fields with compound primary keys can be changed """
vl = self.vl
self.assertTrue(vl.isValid())
idx_key1 = vl.fields().lookupField('key1')
idx_key2 = vl.fields().lookupField('key2')
# the name "pk" for this datasource is misleading;
# the primary key is actually composed by the fields key1 and key2
idx_pk = vl.fields().lookupField('pk')
idx_name = vl.fields().lookupField('name')
idx_name2 = vl.fields().lookupField('name2')
geomwkt = 'Point(-47.945 -15.812)'
# start editing ordinary attribute.
ft1 = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("key1 = 2 AND key2 = 2")))
self.assertTrue(ft1.isValid())
original_geometry = ft1.geometry().asWkt()
vl.startEditing()
self.assertTrue(vl.changeAttributeValues(ft1.id(), {idx_name: 'Rose'}))
self.assertTrue(vl.commitChanges())
# check change
ft2 = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression("key1 = 2 AND key2 = 2")))
self.assertEqual(ft2['name'], 'Rose')
self.assertEqual(ft2['name2'], 'Apple')
self.assertEqual(ft2['pk'], 2)
# now, start editing one of the PK field components
vl.startEditing()
self.assertTrue(vl.dataProvider().changeFeatures({ft2.id(): {idx_key2: 42, idx_name: 'Orchid', idx_name2: 'Daisy'}}, {ft2.id(): QgsGeometry.fromWkt(geomwkt)}))
self.assertTrue(vl.commitChanges())
# let's check if we still have the same fid...
ft2 = next(vl.getFeatures(QgsFeatureRequest().setFilterFid(ft2.id())))
self.assertEqual(ft2['key2'], 42)
self.assertEqual(ft2['name'], 'Orchid')
self.assertEqual(ft2['name2'], 'Daisy')
self.assertTrue(vl.startEditing())
vl.changeAttributeValues(ft2.id(), {idx_key1: 21, idx_name2: 'Hibiscus'})
self.assertTrue(vl.commitChanges())
ft2 = next(vl.getFeatures(QgsFeatureRequest().setFilterFid(ft2.id())))
self.assertEqual(ft2['key1'], 21)
self.assertEqual(ft2['name2'], 'Hibiscus')
# lets get a brand new feature and check how it went...
ft3 = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression('pk = 2')))
self.assertEqual(ft3['name'], 'Orchid')
self.assertEqual(ft3['key1'], 21)
self.assertEqual(ft3['key2'], 42)
assert compareWkt(ft3.geometry().asWkt(), geomwkt), "Geometry mismatch. Expected: {} Got: {}\n".format(ft3.geometry().asWkt(), geomwkt)
# Now, we leave the record as we found it, so further tests can proceed
vl.startEditing()
self.assertTrue(vl.dataProvider().changeFeatures({ft3.id(): {idx_key1: 2, idx_key2: 2, idx_pk: 2, idx_name: 'Apple', idx_name2: 'Apple'}}, {ft3.id(): QgsGeometry.fromWkt(original_geometry)}))
self.assertTrue(vl.commitChanges())
class TestPyQgsPostgresProviderBigintSinglePk(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layers
cls.vl = QgsVectorLayer(
cls.dbconn +
' sslmode=disable key=\'"pk"\' srid=4326 type=POINT table="qgis_test"."provider_bigint_single_pk" (geom) sql=',
'bigint_pk', 'postgres')
assert cls.vl.isValid()
cls.source = cls.vl.dataProvider()
cls.con = psycopg2.connect(cls.dbconn)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def getSource(self):
""" drops/recreates the test data anew, like TestPyQgsPostgresProvider::getSource above. """
self.execSqlCommand(
"DROP TABLE IF EXISTS qgis_test.provider_edit_bigint_single_pk")
self.execSqlCommand(
"CREATE TABLE qgis_test.provider_edit_bigint_single_pk ( pk bigserial PRIMARY KEY, cnt integer, name text DEFAULT 'qgis', name2 text DEFAULT 'qgis', num_char text, dt timestamp without time zone, \"date\" date, \"time\" time without time zone, geom public.geometry(Point,4326), key1 integer, key2 integer)")
self.execSqlCommand(
"INSERT INTO qgis_test.provider_edit_bigint_single_pk ( key1, key2, pk, cnt, name, name2, num_char, dt, \"date\", \"time\", geom) VALUES"
"(1, 1, 5, -200, NULL, 'NuLl', '5', TIMESTAMP '2020-05-04 12:13:14', '2020-05-02', '12:13:01', '0101000020E61000001D5A643BDFC751C01F85EB51B88E5340'),"
"(1, 2, 3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL, NULL),"
"(2, 1, 1, 100, 'Orange', 'oranGe', '1', TIMESTAMP '2020-05-03 12:13:14', '2020-05-03', '12:13:14', '0101000020E61000006891ED7C3F9551C085EB51B81E955040'),"
"(2, 2, 2, 200, 'Apple', 'Apple', '2', TIMESTAMP '2020-05-04 12:14:14', '2020-05-04', '12:14:14', '0101000020E6100000CDCCCCCCCC0C51C03333333333B35140'),"
"(2, 3, 4, 400, 'Honey', 'Honey', '4', TIMESTAMP '2021-05-04 13:13:14', '2021-05-04', '13:13:14', '0101000020E610000014AE47E17A5450C03333333333935340')")
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'"pk"\' srid=4326 type=POINT table="qgis_test"."provider_edit_bigint_single_pk" (geom) sql=',
'edit_bigint_pk', 'postgres')
return vl
def getEditableLayer(self):
return self.getSource()
def execSqlCommand(self, sql):
self.assertTrue(self.con)
cur = self.con.cursor()
self.assertTrue(cur)
cur.execute(sql)
cur.close()
self.con.commit()
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
return True
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
def uncompiledFilters(self):
return set(['"dt" = to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\')',
'"date" = to_date(\'www4ww5ww2020\',\'wwwdwwMwwyyyy\')',
'"time" = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')'])
def partiallyCompiledFilters(self):
return set([])
def testConstraints(self):
idx = self.vl.dataProvider().fieldNameIndex("pk")
self.assertTrue(idx >= 0)
def testGetFeaturesFidTests(self):
fids = [f.id() for f in self.source.getFeatures()]
assert len(fids) == 5, 'Expected 5 features, got {} instead'.format(
len(fids))
for id in fids:
features = [f for f in self.source.getFeatures(
QgsFeatureRequest().setFilterFid(id))]
self.assertEqual(len(features), 1)
feature = features[0]
self.assertTrue(feature.isValid())
result = [feature.id()]
expected = [id]
assert result == expected, 'Expected {} and got {} when testing for feature ID filter'.format(expected,
result)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterFid(id)
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() == id)
# TODO: bad features are not tested because the PostgreSQL provider
# doesn't mark explicitly set invalid features as such.
def testGetFeatures(self, source=None, extra_features=[], skip_features=[], changed_attributes={},
changed_geometries={}):
""" Test that expected results are returned when fetching all features """
# IMPORTANT - we do not use `for f in source.getFeatures()` as we are also
# testing that existing attributes & geometry in f are overwritten correctly
# (for f in ... uses a new QgsFeature for every iteration)
if not source:
source = self.source
it = source.getFeatures()
f = QgsFeature()
attributes = {}
geometries = {}
while it.nextFeature(f):
# expect feature to be valid
self.assertTrue(f.isValid())
# some source test datasets will include additional attributes which we ignore,
# so cherry pick desired attributes
attrs = [f['pk'], f['cnt'], f['name'], f['name2'], f['num_char']]
# DON'T force the num_char attribute to be text - some sources (e.g., delimited text) will
# automatically detect that this attribute contains numbers and set it as a numeric
# field
# TODO: PostgreSQL 12 won't accept conversion from integer to text.
# attrs[4] = str(attrs[4])
attributes[f['pk']] = attrs
geometries[f['pk']] = f.hasGeometry() and f.geometry().asWkt()
expected_attributes = {5: [5, -200, NULL, 'NuLl', '5'],
3: [3, 300, 'Pear', 'PEaR', '3'],
1: [1, 100, 'Orange', 'oranGe', '1'],
2: [2, 200, 'Apple', 'Apple', '2'],
4: [4, 400, 'Honey', 'Honey', '4']}
expected_geometries = {1: 'Point (-70.332 66.33)',
2: 'Point (-68.2 70.8)',
3: None,
4: 'Point(-65.32 78.3)',
5: 'Point(-71.123 78.23)'}
for f in extra_features:
expected_attributes[f[0]] = f.attributes()
if f.hasGeometry():
expected_geometries[f[0]] = f.geometry().asWkt()
else:
expected_geometries[f[0]] = None
for i in skip_features:
del expected_attributes[i]
del expected_geometries[i]
for i, a in changed_attributes.items():
for attr_idx, v in a.items():
expected_attributes[i][attr_idx] = v
for i, g, in changed_geometries.items():
if g:
expected_geometries[i] = g.asWkt()
else:
expected_geometries[i] = None
self.assertEqual(attributes, expected_attributes, 'Expected {}, got {}'.format(
expected_attributes, attributes))
self.assertEqual(len(expected_geometries), len(geometries))
for pk, geom in list(expected_geometries.items()):
if geom:
assert compareWkt(geom, geometries[pk]), "Geometry {} mismatch Expected:\n{}\nGot:\n{}\n".format(pk,
geom,
geometries[
pk])
else:
self.assertFalse(
geometries[pk], 'Expected null geometry for {}'.format(pk))
def testAddFeatureExtraAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with too many attributes drops these attributes
# we be more tricky and also add a valid feature to stress test the provider
f1 = QgsFeature()
f1.setAttributes([6, -220, 'qgis', 'String', '15'])
f2 = QgsFeature()
f2.setAttributes([7, -230, 'qgis', 'String', '15', 15, 16, 17])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result,
'Provider returned False to addFeatures with extra attributes. Providers should accept these features but truncate the extra attributes.')
# make sure feature was added correctly
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 7][0]
# TODO: The PostgreSQL provider doesn't truncate extra attributes!
self.assertNotEqual(added.attributes(), [7, -230, 'qgis', 'String', '15'],
'The PostgreSQL provider doesn\'t truncate extra attributes.')
def testAddFeatureMissingAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with missing attributes pads out these
# attributes with NULL values to the correct length.
# changed from ProviderTestBase.testAddFeatureMissingAttributes: we use
# 'qgis' instead of NULL below.
# TODO: Only unmentioned attributes get filled with the DEFAULT table
# value; if the attribute is present, the saved value will be NULL if
# that is indicated, or the value mentioned by the user; there is no
# implicit conversion of PyQGIS::NULL to PostgreSQL DEFAULT.
f1 = QgsFeature()
f1.setAttributes([6, -220, 'qgis', 'String'])
f2 = QgsFeature()
f2.setAttributes([7, 330])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result,
'Provider returned False to addFeatures with missing attributes. Providers should accept these features but add NULL attributes to the end of the existing attributes to the required field length.')
f1.setId(added[0].id())
f2.setId(added[1].id())
# check result - feature attributes MUST be padded out to required number of fields
f1.setAttributes([6, -220, 'qgis', 'String', NULL])
f2.setAttributes([7, 330, 'qgis', 'qgis', NULL])
self.testGetFeatures(l.dataProvider(), [f1, f2])
def testAddFeature(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
f1 = QgsFeature()
# changed from ProviderTestBase.testAddFeature: we use 'qgis' instead
# of NULL below.
# TODO: Only unmentioned attributes get filled with the DEFAULT table
# value; if the attribute is present, the saved value will be NULL if
# that is indicated, or the value mentioned by the user; there is no
# implicit conversion of PyQGIS::NULL to PostgreSQL DEFAULT.
f1.setAttributes([6, -220, 'qgis', 'String', '15'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2 = QgsFeature()
f2.setAttributes([7, 330, 'Coconut', 'CoCoNut', '13'])
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
# expect success
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(
result, 'Provider reported AddFeatures capability, but returned False to addFeatures')
f1.setId(added[0].id())
f2.setId(added[1].id())
# check result
self.testGetFeatures(l.dataProvider(), [f1, f2])
# add empty list, should return true for consistency
self.assertTrue(l.dataProvider().addFeatures([]))
# ensure that returned features have been given the correct id
f = next(l.getFeatures(
QgsFeatureRequest().setFilterFid(added[0].id())))
self.assertTrue(f.isValid())
self.assertEqual(f['cnt'], -220)
f = next(l.getFeatures(
QgsFeatureRequest().setFilterFid(added[1].id())))
self.assertTrue(f.isValid())
self.assertEqual(f['cnt'], 330)
else:
# expect fail
self.assertFalse(l.dataProvider().addFeatures([f1, f2]),
'Provider reported no AddFeatures capability, but returned true to addFeatures')
def testModifyPk(self):
""" Check if we can modify a primary key value. Since this PK is bigint, we also exercise the mapping between fid and values """
vl = self.getEditableLayer()
self.assertTrue(vl.isValid())
geomwkt = 'Point(-47.945 -15.812)'
feature = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression('pk = 4')))
self.assertTrue(feature.isValid())
self.assertTrue(vl.startEditing())
idxpk = vl.fields().lookupField('pk')
self.assertTrue(vl.dataProvider().changeFeatures({feature.id(): {idxpk: 42}}, {feature.id(): QgsGeometry.fromWkt(geomwkt)}))
self.assertTrue(vl.commitChanges())
# read back
ft = next(vl.getFeatures(QgsFeatureRequest().setFilterExpression('pk = 42')))
self.assertTrue(ft.isValid())
self.assertEqual(ft['name'], 'Honey')
assert compareWkt(ft.geometry().asWkt(), geomwkt), "Geometry mismatch. Expected: {} Got: {}\n".format(ft.geometry().asWkt(), geomwkt)
def testDuplicatedFieldNamesInQueryLayers(self):
"""Test regresssion GH #36205"""
vl = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'__rid__\' table="(SELECT row_number() OVER () AS __rid__, * FROM (SELECT * from qgis_test.some_poly_data a, qgis_test.some_poly_data b where ST_Intersects(a.geom,b.geom)) as foo)" sql=', 'test_36205', 'postgres')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 3)
# This fails because the "geom" field and "pk" fields are ambiguous
# There is no easy fix: all duplicated fields should be explicitly aliased
# and the query internally rewritten
# feature = next(vl.getFeatures())
# self.assertTrue(vl.isValid())
def testUnrestrictedGeometryType(self):
"""Test geometry column with no explicit geometry type, regression GH #38565"""
md = QgsProviderRegistry.instance().providerMetadata("postgres")
conn = md.createConnection(self.dbconn, {})
# Cleanup if needed
try:
conn.dropVectorTable('qgis_test', 'test_unrestricted_geometry')
except QgsProviderConnectionException:
pass
conn.executeSql('''
CREATE TABLE "qgis_test"."test_unrestricted_geometry" (
gid serial primary key,
geom geometry(Geometry, 4326)
);''')
points = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' srid=4326 type=POINT table="qgis_test"."test_unrestricted_geometry" (geom) sql=', 'test_points', 'postgres')
lines = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' srid=4326 type=LINESTRING table="qgis_test"."test_unrestricted_geometry" (geom) sql=', 'test_lines', 'postgres')
polygons = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' srid=4326 type=POLYGON table="qgis_test"."test_unrestricted_geometry" (geom) sql=', 'test_polygons', 'postgres')
self.assertTrue(points.isValid())
self.assertTrue(lines.isValid())
self.assertTrue(polygons.isValid())
f = QgsFeature(points.fields())
f.setGeometry(QgsGeometry.fromWkt('point(9 45)'))
self.assertTrue(points.dataProvider().addFeatures([f]))
self.assertEqual(points.featureCount(), 1)
self.assertEqual(lines.featureCount(), 0)
self.assertEqual(polygons.featureCount(), 0)
# Fetch from iterator
self.assertTrue(compareWkt(next(points.getFeatures()).geometry().asWkt(), 'point(9 45)'))
with self.assertRaises(StopIteration):
next(lines.getFeatures())
with self.assertRaises(StopIteration):
next(polygons.getFeatures())
f.setGeometry(QgsGeometry.fromWkt('linestring(9 45, 10 46)'))
self.assertTrue(lines.dataProvider().addFeatures([f]))
self.assertEqual(points.featureCount(), 1)
self.assertEqual(lines.featureCount(), 1)
self.assertEqual(polygons.featureCount(), 0)
# Fetch from iterator
self.assertTrue(compareWkt(next(points.getFeatures()).geometry().asWkt(), 'point(9 45)'))
self.assertTrue(compareWkt(next(lines.getFeatures()).geometry().asWkt(), 'linestring(9 45, 10 46)'))
with self.assertRaises(StopIteration):
next(polygons.getFeatures())
# Test regression GH #38567 (no SRID requested in the data source URI)
# Cleanup if needed
conn.executeSql('DELETE FROM "qgis_test"."test_unrestricted_geometry" WHERE \'t\'')
points = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' type=POINT table="qgis_test"."test_unrestricted_geometry" (geom) sql=', 'test_points', 'postgres')
lines = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' type=LINESTRING table="qgis_test"."test_unrestricted_geometry" (geom) sql=', 'test_lines', 'postgres')
polygons = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'gid\' type=POLYGON table="qgis_test"."test_unrestricted_geometry" (geom) sql=', 'test_polygons', 'postgres')
self.assertTrue(points.isValid())
self.assertTrue(lines.isValid())
self.assertTrue(polygons.isValid())
def testTrustFlag(self):
"""Test regression https://github.com/qgis/QGIS/issues/38809"""
vl = QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'pk\' srid=4326 type=POINT table="qgis_test"."editData" (geom) sql=',
'testTrustFlag', 'postgres')
self.assertTrue(vl.isValid())
p = QgsProject.instance()
d = QTemporaryDir()
dir_path = d.path()
self.assertTrue(p.addMapLayers([vl]))
project_path = os.path.join(dir_path, 'testTrustFlag.qgs')
self.assertTrue(p.write(project_path))
del vl
p.clear()
self.assertTrue(p.read(project_path))
vl = p.mapLayersByName('testTrustFlag')[0]
self.assertTrue(vl.isValid())
self.assertFalse(p.trustLayerMetadata())
# Set the trust flag
p.setTrustLayerMetadata(True)
self.assertTrue(p.write(project_path))
# Re-read
p.clear()
self.assertTrue(p.read(project_path))
self.assertTrue(p.trustLayerMetadata())
vl = p.mapLayersByName('testTrustFlag')[0]
self.assertTrue(vl.isValid())
def testQueryLayerDuplicatedFields(self):
"""Test that duplicated fields from a query layer are returned"""
def _get_layer(sql):
return QgsVectorLayer(
self.dbconn +
' sslmode=disable key=\'__rid__\' table=\'(SELECT row_number() OVER () AS __rid__, * FROM (' + sql + ') as foo)\' sql=',
'test', 'postgres')
l = _get_layer('SELECT 1, 2')
self.assertEqual(l.fields().count(), 3)
self.assertEqual([f.name() for f in l.fields()], ['__rid__', '?column?', '?column? (2)'])
l = _get_layer('SELECT 1 as id, 2 as id')
self.assertEqual(l.fields().count(), 3)
self.assertEqual([f.name() for f in l.fields()], ['__rid__', 'id', 'id (2)'])
def testInsertOnlyFieldIsEditable(self):
"""Test issue #40922 when an INSERT only use cannot insert a new feature"""
md = QgsProviderRegistry.instance().providerMetadata("postgres")
conn = md.createConnection(self.dbconn, {})
conn.executeSql('DROP TABLE IF EXISTS public.insert_only_points')
conn.executeSql('DROP USER IF EXISTS insert_only_user')
conn.executeSql('CREATE USER insert_only_user WITH PASSWORD \'insert_only_user\'')
conn.executeSql('CREATE TABLE insert_only_points (id SERIAL PRIMARY KEY, name VARCHAR(64))')
conn.executeSql("SELECT AddGeometryColumn('public', 'insert_only_points', 'geom', 4326, 'POINT', 2 )")
conn.executeSql('GRANT SELECT ON "public"."insert_only_points" TO insert_only_user')
uri = QgsDataSourceUri(self.dbconn +
' sslmode=disable key=\'id\'srid=4326 type=POINT table="public"."insert_only_points" (geom) sql=')
uri.setUsername('insert_only_user')
uri.setPassword('insert_only_user')
vl = QgsVectorLayer(uri.uri(), 'test', 'postgres')
self.assertTrue(vl.isValid())
self.assertFalse(vl.startEditing())
feature = QgsFeature(vl.fields())
self.assertFalse(QgsVectorLayerUtils.fieldIsEditable(vl, 0, feature))
self.assertFalse(QgsVectorLayerUtils.fieldIsEditable(vl, 1, feature))
conn.executeSql('GRANT INSERT ON "public"."insert_only_points" TO insert_only_user')
vl = QgsVectorLayer(uri.uri(), 'test', 'postgres')
feature = QgsFeature(vl.fields())
self.assertTrue(vl.startEditing())
self.assertTrue(QgsVectorLayerUtils.fieldIsEditable(vl, 0, feature))
self.assertTrue(QgsVectorLayerUtils.fieldIsEditable(vl, 1, feature))
def testPkeyIntArray(self):
"""
Test issue #42778 when pkey is an int array
"""
md = QgsProviderRegistry.instance().providerMetadata("postgres")
conn = md.createConnection(self.dbconn, {})
conn.executeSql('DROP TABLE IF EXISTS public.test_pkey_intarray')
conn.executeSql('CREATE TABLE public.test_pkey_intarray (id _int8 PRIMARY KEY, name VARCHAR(64))')
conn.executeSql("""INSERT INTO public.test_pkey_intarray (id, name) VALUES('{0,0,19111815}', 'test')""")
uri = QgsDataSourceUri(self.dbconn +
' sslmode=disable key=\'id\' table="public"."test_pkey_intarray" sql=')
vl = QgsVectorLayer(uri.uri(), 'test', 'postgres')
self.assertTrue(vl.isValid())
feat = next(vl.getFeatures())
self.assertTrue(feat.isValid())
self.assertEqual(feat["name"], "test")
fid = feat.id()
self.assertTrue(fid > 0)
feat = vl.getFeature(fid)
self.assertTrue(feat.isValid())
self.assertEqual(feat["name"], "test")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
nutztherookie/wagtail | wagtail/contrib/wagtailfrontendcache/tests.py | 4 | 6379 | from __future__ import absolute_import, unicode_literals
import mock
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.contrib.wagtailfrontendcache.backends import (
BaseBackend, CloudflareBackend, CloudfrontBackend, HTTPBackend)
from wagtail.contrib.wagtailfrontendcache.utils import get_backends
from wagtail.tests.testapp.models import EventIndex
from wagtail.wagtailcore.models import Page
class TestBackendConfiguration(TestCase):
def test_default(self):
backends = get_backends()
self.assertEqual(len(backends), 0)
def test_varnish(self):
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
})
self.assertEqual(set(backends.keys()), set(['varnish']))
self.assertIsInstance(backends['varnish'], HTTPBackend)
self.assertEqual(backends['varnish'].cache_scheme, 'http')
self.assertEqual(backends['varnish'].cache_netloc, 'localhost:8000')
def test_cloudflare(self):
backends = get_backends(backend_settings={
'cloudflare': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.CloudflareBackend',
'EMAIL': 'test@test.com',
'TOKEN': 'this is the token',
'ZONEID': 'this is a zone id',
},
})
self.assertEqual(set(backends.keys()), set(['cloudflare']))
self.assertIsInstance(backends['cloudflare'], CloudflareBackend)
self.assertEqual(backends['cloudflare'].cloudflare_email, 'test@test.com')
self.assertEqual(backends['cloudflare'].cloudflare_token, 'this is the token')
def test_cloudfront(self):
backends = get_backends(backend_settings={
'cloudfront': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.CloudfrontBackend',
'DISTRIBUTION_ID': 'frontend',
},
})
self.assertEqual(set(backends.keys()), set(['cloudfront']))
self.assertIsInstance(backends['cloudfront'], CloudfrontBackend)
self.assertEqual(backends['cloudfront'].cloudfront_distribution_id, 'frontend')
def test_cloudfront_validate_distribution_id(self):
with self.assertRaises(ImproperlyConfigured):
get_backends(backend_settings={
'cloudfront': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.CloudfrontBackend',
},
})
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.CloudfrontBackend._create_invalidation')
def test_cloudfront_distribution_id_mapping(self, _create_invalidation):
backends = get_backends(backend_settings={
'cloudfront': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.CloudfrontBackend',
'DISTRIBUTION_ID': {
'www.wagtail.io': 'frontend',
}
},
})
backends.get('cloudfront').purge('http://www.wagtail.io/home/events/christmas/')
backends.get('cloudfront').purge('http://torchbox.com/blog/')
_create_invalidation.assert_called_once_with('frontend', '/home/events/christmas/')
def test_multiple(self):
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000/',
},
'cloudflare': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.CloudflareBackend',
'EMAIL': 'test@test.com',
'TOKEN': 'this is the token',
'ZONEID': 'this is a zone id',
}
})
self.assertEqual(set(backends.keys()), set(['varnish', 'cloudflare']))
def test_filter(self):
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000/',
},
'cloudflare': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.CloudflareBackend',
'EMAIL': 'test@test.com',
'TOKEN': 'this is the token',
'ZONEID': 'this is a zone id',
}
}, backends=['cloudflare'])
self.assertEqual(set(backends.keys()), set(['cloudflare']))
@override_settings(WAGTAILFRONTENDCACHE_LOCATION='http://localhost:8000')
def test_backwards_compatibility(self):
backends = get_backends()
self.assertEqual(set(backends.keys()), set(['default']))
self.assertIsInstance(backends['default'], HTTPBackend)
self.assertEqual(backends['default'].cache_scheme, 'http')
self.assertEqual(backends['default'].cache_netloc, 'localhost:8000')
PURGED_URLS = []
class MockBackend(BaseBackend):
def __init__(self, config):
pass
def purge(self, url):
PURGED_URLS.append(url)
@override_settings(WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.tests.MockBackend',
},
})
class TestCachePurging(TestCase):
fixtures = ['test.json']
def test_purge_on_publish(self):
PURGED_URLS[:] = [] # reset PURGED_URLS to the empty list
page = EventIndex.objects.get(url_path='/home/events/')
page.save_revision().publish()
self.assertEqual(PURGED_URLS, ['http://localhost/events/'])
def test_purge_on_unpublish(self):
PURGED_URLS[:] = [] # reset PURGED_URLS to the empty list
page = EventIndex.objects.get(url_path='/home/events/')
page.unpublish()
self.assertEqual(PURGED_URLS, ['http://localhost/events/'])
def test_purge_with_unroutable_page(self):
PURGED_URLS[:] = [] # reset PURGED_URLS to the empty list
root = Page.objects.get(url_path='/')
page = EventIndex(title='new top-level page')
root.add_child(instance=page)
page.save_revision().publish()
self.assertEqual(PURGED_URLS, [])
| bsd-3-clause |
tshi04/machine-learning-codes | TextGAN_ST/utils.py | 2 | 5932 | import numpy as np
import torch
import time
from torch.autograd import Variable
'''
fast beam search
'''
def tensor_transformer(seq0, batch_size, beam_size):
seq = seq0.unsqueeze(2)
seq = seq.repeat(1, 1, beam_size, 1)
seq = seq.contiguous().view(batch_size, beam_size*beam_size, seq.size(3))
return seq
def fast_beam_search(
model,
src_text,
vocab2id,
beam_size=4,
max_len=20,
network='gru',
pointer_net=True
):
batch_size = src_text.size(0)
src_text_rep = src_text.unsqueeze(1).clone().repeat(1, beam_size, 1).view(-1, src_text.size(1)).cuda()
if network == 'lstm':
encoder_hy, (h0_new, c0_new), h_attn_new, hidden_attn_new, past_attn_new = model.forward_encoder(src_text_rep)
else:
encoder_hy, hidden_decoder_new, h_attn_new, hidden_attn_new, past_attn_new = model.forward_encoder(src_text_rep)
beam_seq = Variable(torch.LongTensor(batch_size, beam_size, max_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
for j in range(max_len):
if network == 'lstm':
logits, (h0, c0), h_attn, hidden_attn, past_attn, p_gen, attn_ = model.forward_onestep_decoder(
last_wd.view(-1, 1),
(h0_new, c0_new),
h_attn_new,
encoder_hy,
hidden_attn_new,
past_attn_new
)
else:
logits, hidden_decoder, h_attn, hidden_attn, past_attn, p_gen, attn_ = model.forward_onestep_decoder(
last_wd.view(-1, 1),
hidden_decoder_new,
h_attn_new,
encoder_hy,
hidden_attn_new,
past_attn_new
)
if pointer_net:
logits = model.cal_dist(src_text_rep, logits, attn_, p_gen)
else:
logits = F.softmax(logits, dim=2)
prob, wds = logits.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
if network == 'lstm':
h0_new = h0
c0_new = c0
else:
hidden_decoder_new = hidden_decoder
h_attn_new = h_attn
hidden_attn_new = hidden_attn
past_attn_new = past_attn
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, -1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam_size*beam_size)
if network == 'lstm':
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
else:
hidden_decoder_new = hidden_decoder_new.view(batch_size, beam_size, hidden_decoder_new.size(-1))
h_attn_new = h_attn_new.view(batch_size, beam_size, h_attn_new.size(1))
hidden_attn_new = hidden_attn_new.view(batch_size, beam_size, hidden_attn_new.size(1))
past_attn_new = past_attn_new.view(batch_size, beam_size, past_attn_new.size(1))
if network == 'lstm':
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
else:
hidden_decoder = hidden_decoder.view(batch_size, beam_size, hidden_decoder.size(-1))
hidden_decoder = tensor_transformer(hidden_decoder, batch_size, beam_size)
h_attn = h_attn.view(batch_size, beam_size, h_attn.size(1))
h_attn = tensor_transformer(h_attn, batch_size, beam_size)
hidden_attn = hidden_attn.view(batch_size, beam_size, hidden_attn.size(1))
hidden_attn = tensor_transformer(hidden_attn, batch_size, beam_size)
past_attn = past_attn.view(batch_size, beam_size, past_attn.size(1))
past_attn = tensor_transformer(past_attn, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range(batch_size):
for b in range(beam_size):
last_wd[x, b] = cand_last_wd[x, tmp_idx[x, b]]
beam_seq[x, b] = cand_seq[x, tmp_idx[x, b]]
beam_prb[x, b] = tmp_prb[x, b]
if network == 'lstm':
h0_new[x, b] = h0[x, tmp_idx[x, b]]
c0_new[x, b] = c0[x, tmp_idx[x, b]]
else:
hidden_decoder_new[x, b] = hidden_decoder[x, tmp_idx[x, b]]
h_attn_new[x, b] = h_attn[x, tmp_idx[x, b]]
hidden_attn_new[x, b] = hidden_attn[x, tmp_idx[x, b]]
past_attn_new[x, b] = past_attn[x, tmp_idx[x, b]]
if network == 'lstm':
h0_new = h0_new.view(-1, h0_new.size(-1))
c0_new = c0_new.view(-1, c0_new.size(-1))
else:
hidden_decoder_new = hidden_decoder_new.view(-1, hidden_decoder_new.size(-1))
h_attn_new = h_attn_new.view(-1, h_attn_new.size(-1))
hidden_attn_new = hidden_attn_new.view(-1, hidden_attn_new.size(-1))
past_attn_new = past_attn_new.view(-1, past_attn_new.size(-1))
return beam_seq, beam_prb
| gpl-3.0 |
tshi04/machine-learning-codes | seq2seq_lstm_luong_ST/data_utils.py | 1 | 3692 | import os
import re
import glob
import shutil
import numpy as np
import torch
from torch.autograd import Variable
'''
Construct the vocabulary
'''
def construct_vocab(file_, max_size=200000, mincount=5):
vocab2id = {'<s>': 2, '</s>': 3, '<pad>': 1, '<unk>': 0}
id2vocab = {2: '<s>', 3: '</s>', 1: '<pad>', 0: '<unk>'}
word_pad = {'<s>': 2, '</s>': 3, '<pad>': 1, '<unk>': 0}
cnt = 4
with open(file_, 'r') as fp:
for line in fp:
arr = re.split('\s', line[:-1])
if arr[0] == ' ':
continue
if arr[0] in word_pad:
continue
if int(arr[1]) >= mincount:
vocab2id[arr[0]] = cnt
id2vocab[cnt] = arr[0]
cnt += 1
if len(vocab2id) == max_size:
break
return vocab2id, id2vocab
'''
Split the corpus into batches.
'''
def create_batch_file(path_, fkey_, file_, batch_size, clean=False):
file_name = os.path.join(path_, file_)
folder = os.path.join(path_, 'batch_'+fkey_+'_'+str(batch_size))
if os.path.exists(folder):
batch_files = glob.glob(os.path.join(folder, '*'))
if len(batch_files) > 0 and clean==False:
return len(batch_files)
try:
shutil.rmtree(folder)
os.mkdir(folder)
except:
os.mkdir(folder)
fp = open(file_name, 'r')
cnt = 0
for line in fp:
try:
arr.append(line)
except:
arr = [line]
if len(arr) == batch_size:
fout = open(os.path.join(folder, str(cnt)), 'w')
for itm in arr:
fout.write(itm)
fout.close()
arr = []
cnt += 1
fout = open(os.path.join(folder, str(cnt)), 'w')
for itm in arr:
fout.write(itm)
fout.close()
arr = []
cnt += 1
fp.close()
return cnt
'''
Process the minibatch.
'''
def process_minibatch(batch_id, path_, fkey_, batch_size, vocab2id, max_lens=[400, 100]):
file_ = os.path.join(path_, 'batch_'+fkey_+'_'+str(batch_size), str(batch_id))
fp = open(file_, 'r')
src_arr = []
trg_arr = []
src_lens = []
trg_lens = []
for line in fp:
arr = re.split('<sec>', line[:-1])
dabs = re.split('\s', arr[0])
dabs = filter(None, dabs)
trg_lens.append(len(dabs))
dabs2id = [
vocab2id[wd] if wd in vocab2id
else vocab2id['<unk>']
for wd in dabs
]
trg_arr.append(dabs2id)
dart = re.split('\s', arr[1])
dart = filter(None, dart)
src_lens.append(len(dart))
dart2id = [
vocab2id[wd] if wd in vocab2id
else vocab2id['<unk>']
for wd in dart
]
src_arr.append(dart2id)
fp.close()
# here we can make algorithm more efficient.
src_max_lens = max_lens[0]
trg_max_lens = max_lens[1]
src_arr = [itm[:src_max_lens] for itm in src_arr]
trg_arr = [itm[:trg_max_lens] for itm in trg_arr]
src_arr = [
itm + [vocab2id['<pad>']]*(src_max_lens-len(itm))
for itm in src_arr
]
trg_input_arr = [
itm[:-1] + [vocab2id['<pad>']]*(1+trg_max_lens-len(itm))
for itm in trg_arr
]
trg_output_arr = [
itm[1:] + [vocab2id['<pad>']]*(1+trg_max_lens-len(itm))
for itm in trg_arr
]
src_var = Variable(torch.LongTensor(src_arr))
trg_input_var = Variable(torch.LongTensor(trg_input_arr))
trg_output_var = Variable(torch.LongTensor(trg_output_arr))
return src_var, trg_input_var, trg_output_var
| gpl-3.0 |
zorroblue/scikit-learn | examples/covariance/plot_outlier_detection.py | 15 | 5121 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white',
s=20, edgecolor='k')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black',
s=20, edgecolor='k')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
MLDGR/Kaggle_HomesiteQuoteConversion | Python/XGB_V1.py | 1 | 2521 | # adapted from:
# https://www.kaggle.com/mpearmain/homesite-quote-conversion/xgboost-benchmark/code
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_auc_score
seed = 12345
train = 'train.csv'
test = 'test.csv'
print 'Reading data...'
train = pd.read_csv(train)
test = pd.read_csv(test)
def format_data(d):
d.drop('QuoteNumber', axis=1, inplace=True)
# create date features
d['Date'] = pd.to_datetime(pd.Series(d['Original_Quote_Date']))
d['Year'] = d['Date'].dt.year
d['Month'] = d['Date'].dt.month
d['DayOfWeek'] = d['Date'].dt.dayofweek
d.drop('Original_Quote_Date', axis=1, inplace=True)
d.drop('Date', axis=1, inplace=True)
# fill NaN
d = d.fillna(-1)
return d
print 'Formatting data...'
y = np.array(train['QuoteConversion_Flag'])
train.drop('QuoteConversion_Flag', axis=1, inplace=True)
train = format_data(train)
submission = test[['QuoteNumber']]
test = format_data(test)
print 'Creating features...'
features = train.columns
# convert categorical features to numeric
for f in features:
if train[f].dtype=='object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
# create train/eval
train_X, eval_X, train_y, eval_y = train_test_split(train, y, test_size=.05)
dtrain = xgb.DMatrix(train_X, train_y)
deval = xgb.DMatrix(eval_X, eval_y)
watchlist = [(dtrain, 'train'), (deval, 'eval')]
params = {"objective": "binary:logistic",
"booster" : "gbtree",
"eta": 0.08,
"max_depth": 13,
"subsample": 0.7,
"colsample_bytree": 0.7,
"eval_metric": "auc",
"silent": 1
}
rounds = 1600
print 'Training model...'
gbm = xgb.train(params, dtrain, rounds, evals=watchlist, early_stopping_rounds=50, verbose_eval=True)
preds = gbm.predict(deval)
score = roc_auc_score(eval_y, preds)
print 'Evaluation set AUC: {0}'.format(score)
print 'Making submission...'
dtest = xgb.DMatrix(test)
submission_preds = gbm.predict(dtest)
submission['QuoteConversion_Flag'] = submission_preds
submission.to_csv('xgb_submission0005.csv', index=False)
# XGB feature importances
#xgb.plot_importance(gbm)
#mpl.pyplot.savefig('foo.png')
x = pd.Series(gbm.get_fscore())
x.to_csv('feature_score5.csv') | gpl-3.0 |
shikhardb/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 290 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
10clouds/edx-platform | lms/djangoapps/course_blocks/__init__.py | 12 | 1370 | """
The Course Blocks app, built upon the Block Cache framework in
openedx.core.lib.block_structure, is a higher layer django app in LMS that
provides additional context of Courses and Users (via usage_info.py) with
implementations for Block Structure Transformers that are related to
block structure course access.
As described in the Block Cache framework's __init__ module, this
framework provides faster access to course blocks for performance
sensitive features, by caching all transformer-required data so no
modulestore access is necessary during block access.
It is expected that only Block Access related transformers reside in
this django app, as they are cross-cutting authorization transformers
required across other features. Other higher-level and feature-specific
transformers should be implemented in their own separate apps.
Note: Currently, some of the implementation is redundant with the
has_access code in courseware/access.py. However, we do have short-term
plans for refactoring the current has_access code to use Course Blocks
instead (https://openedx.atlassian.net/browse/MA-1019). We have
introduced this redundancy in the short-term as an incremental
implementation approach, reducing risk with initial release of this app.
"""
# Importing signals is necessary to activate the course publish/delete signal handlers.
from . import signals
| agpl-3.0 |
MadsJensen/RP_scripts | perm_test_score_gat_cp.py | 1 | 2341 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 5 10:27:24 2017
@author: mje
"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import (permutation_test_score, StratifiedKFold)
from sklearn.externals import joblib
from my_settings import (data_path, source_folder, step_size, window_size)
# make time points
times = np.arange(-4000, 1001, 1)
times = times / 1000.
selected_times = times[::step_size]
n_time = sum((selected_times + window_size) < times[-1])
# Load data
subjects = [
"0008", "0009", "0010", "0012", "0014", "0015", "0016", "0017", "0018",
"0019", "0020", "0021", "0022"
]
cls_all = []
pln_all = []
for subject in subjects:
cls = np.load(source_folder + "graph_data/%s_cls_pow_sliding.npy" %
subject).item()
pln = np.load(source_folder + "graph_data/%s_pln_pow_sliding.npy" %
subject).item()
cls_tmp = []
cls_tmp.append(cls["cp_alpha"])
cls_tmp.append(cls["cp_beta"])
cls_tmp.append(cls["cp_gamma_low"])
cls_tmp.append(cls["cp_gamma_high"])
pln_tmp = []
pln_tmp.append(pln["cp_alpha"])
pln_tmp.append(pln["cp_beta"])
pln_tmp.append(pln["cp_gamma_low"])
pln_tmp.append(pln["cp_gamma_high"])
cls_all.append(np.asarray(cls_tmp))
pln_all.append(np.asarray(pln_tmp))
data_cls = np.asarray(cls_all)
data_pln = np.asarray(pln_all)
# Load GAT model
gat = joblib.load(data_path + "decode_time_gen/gat_cp.jl")
# Setup data for epochs and cross validation
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedKFold(n_splits=7, shuffle=True)
perm_score_results = []
for j, est in enumerate(gat.estimators_):
for tmp in est:
lr_mean = LogisticRegression(C=0.0001)
lr_mean.coef_ = np.asarray([lr.coef_ for lr in est]).mean(
axis=0).squeeze()
lr_mean.intercept_ = np.asarray([lr.intercept_ for lr in est]).mean()
score, perm_score, pval = permutation_test_score(
lr_mean, X[:, :, j], y, cv=cv, scoring="roc_auc", n_permutations=2000)
perm_score_results.append({
"score": score,
"perm_score": perm_score,
"pval": pval
})
joblib.dump(perm_score_results,
data_path + "decode_time_gen/perm_score_results_cp.npy")
| bsd-3-clause |
jccaicedo/localization-agent | utils/libLearning.py | 1 | 10431 | import numpy as np
import utils as cu
import libDetection as det
import dataProcessor as dp
from utils import emptyMatrix
###############################################
# Hard Negative Mining
###############################################
class HardNegativeMining():
def __init__(self,currentModel,maxVectorsPerImage):
self.currentModel = currentModel
self.maxVectorsPerImage = maxVectorsPerImage
def run(self,img,features,bboxes):
pred = self.currentModel.predict(features,bboxes)
candidates = pred > -1.0001
f = features[candidates]
p = pred[candidates]
bboxes = [bboxes[i] for i in range(len(bboxes)) if candidates[i]]
# Sort candidate detections by score
s = np.argsort(p)
j = min(2*self.maxVectorsPerImage,f.shape[0])
# Keep top candidates only
if j > 0:
return (f[ s[-j:] ], p[ s[-j:] ], bboxes)
else:
return None
def getHardNegatives(negativesDir,negativesList,featuresExt,numFeatures,maxVectors,currentModel):
maxVectorsPerImage = maxVectors/len(negativesList)
i = 0
task = HardNegativeMining(currentModel,maxVectorsPerImage)
result = dp.processData(negativesList,negativesDir,featuresExt,task)
hardng = emptyMatrix([2*maxVectors,numFeatures])
boxes = []
while len(result) > 0:
data = result.pop(0)
if data[0].shape[0]+i > hardng.shape[0]:
print 'Not enough matrix space'
hardng = np.concatenate( (hardng,emptyMatrix([maxVectors,numFeatures])) )
hardng[i:i+data[0].shape[0],:] = data[0]
boxes += data[2]
i = i + data[0].shape[0]
return hardng[0:i,:],boxes[0:i]
###############################################
# Random Negative Windows Filter
###############################################
class RandomNegativesFilter():
def __init__(self,numFeatures,randomBoxes):
self.numFeatures = numFeatures
self.randomBoxes = randomBoxes
def run(self,img,features,bboxes):
boxes = range(0,features.shape[0])
cu.rnd.shuffle(boxes)
m = min(features.shape[0],self.randomBoxes)
bboxes = [bboxes[i] for i in boxes]
return (features[boxes[0:m]],bboxes)
def getRandomNegs(featuresDir,negativeList,featuresExt,numFeatures,maxVectors,maxNegativeImages):
randomBoxes = maxVectors/maxNegativeImages
cu.rnd.shuffle(negativeList)
task = RandomNegativesFilter(numFeatures,randomBoxes)
negatives = [negativeList.pop(0) for i in range(maxNegativeImages)]
result = dp.processData(negatives,featuresDir,featuresExt,task)
neg = emptyMatrix([maxVectors,numFeatures])
boxes = []
n = 0
while len(result) > 0:
mat,box = result.pop()
neg[n:n+mat.shape[0]] = mat
n = n + mat.shape[0]
boxes += box
return (neg[0:n],boxes[0:n])
###############################################
# Negative-Windows-From-Positive-Images Filter
###############################################
class NWFPIFilter():
def __init__(self,groundTruths,featuresDir,featuresExt,maxNegatives,overlap,model):
self.groundTruths = groundTruths
self.featuresDir = featuresDir
self.featuresExt = featuresExt
self.maxNegatives = maxNegatives
self.overlap = overlap
self.model = model
def rank(self,img,features,bboxes):
pred = self.model.predict(features,bboxes)
candidates = pred > -1.0001
f = features[candidates]
b = [bboxes[t] for t in range(len(bboxes)) if candidates[t]]
p = pred[candidates]
# Sort candidate detections by score
s = np.argsort(p)
j = min(2*self.maxNegatives,f.shape[0])
# Keep top candidates only
if j > 0:
return (f[ s[-j:] ], [ b[t] for t in s[-j:] ])
else:
return None,None
def run(self,img,features,bboxes):
if self.model:
features,bboxes = self.rank(img,features,bboxes)
if features == None:
return ([],[],[],[])
positives,negatives = [],[]
imageData = self.groundTruths[img]
for i in range( len(bboxes) ):
isPositive,isNegative = False,False
for j in imageData:
o = det.IoU(j,map(float,bboxes[i][1:]))
if o >= 0.85:
isPositive = True
break
elif self.overlap >= o and o > 0:
isNegative = True
if isPositive:
positives.append(i)
if isNegative:
negatives.append(i)
if self.model:
negatives.reverse()
else:
cu.rnd.shuffle(negatives)
posIdx = [bboxes[t] for t in positives]
posFeat = [features[positives]]
negIdx = [bboxes[t] for t in negatives[0:self.maxNegatives]]
negFeat = [features[negatives[0:self.maxNegatives]]]
return (posIdx,posFeat,negIdx,negFeat)
def selectNegativeWindowsFromPositiveImages(groundTruths,featuresDir,featuresExt,maxVectors,overlap,model=False):
gtb = dict()
for x in groundTruths:
im,bx = x[0],map(float,x[1:])
try:
gtb[im].append(bx)
except:
gtb[im] = [bx]
task = NWFPIFilter(gtb,featuresDir,featuresExt,maxVectors/len(gtb.keys()),overlap,model)
result = dp.processData(gtb.keys(),featuresDir,featuresExt,task)
posIdx,posFeat,negIdx,negFeat = [],[],[],[]
for r in result:
posIdx += r[0]
posFeat += r[1]
negIdx += r[2]
negFeat += r[3]
Xp = emptyMatrix( (len(posIdx),posFeat[0].shape[1]) )
Xn = emptyMatrix( (len(negIdx),negFeat[0].shape[1]) )
k = 0
for i in range(len(posFeat)):
Xp[k:k+posFeat[i].shape[0],:] = posFeat[i]
k = k + posFeat[i].shape[0]
k = 0
for i in range(len(negFeat)):
Xn[k:k+negFeat[i].shape[0],:] = negFeat[i]
k + k + negFeat[i].shape[0]
print 'NegFromPos ready:',len(negIdx)
return {'posIdx':posIdx, 'posFeat':Xp, 'negIdx':negIdx, 'negFeat':Xn}
###############################################
# Cross-validation evaluation
###############################################
def reportCrossValidationPerformance(clf,X,Y):
from sklearn import cross_validation
import sklearn.metrics as met
skf = cross_validation.StratifiedKFold(Y, n_folds=10)
p,r = 0.0,0.0
for train_index, test_index in skf:
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
clf.fit(X_train,Y_train)
# Performance measures:
pred = clf.predict(X_test)
cfm = met.confusion_matrix(Y_test,pred)
precision = float(cfm[1][1])/(cfm[1][1] + cfm[0][1])
recall = float(cfm[1][1])/(cfm[1][1] + cfm[1][0])
p += precision
r += recall
print '{:.4f} {:.4f}'.format(precision,recall)
print cfm
print 'AVG {:.4f} {:.4f}'.format(p/10.0, r/10.0)
###############################################
# Load Hard Negatives from predefined list
###############################################
class LoadHardNegatives():
def __init__(self,boxInfo):
self.boxInfo = boxInfo
def run(self,img,features,bboxes):
wanted = set([ ':'.join(map(str,x)) for x in self.boxInfo[img] ])
candidates = []
imgList = []
box = []
for i in range(len(bboxes)):
b = bboxes[i]
boxHash = ':'.join(b[1:])
if boxHash in wanted:
candidates.append(True)
imgList.append(img)
box.append(b)
wanted.remove(boxHash)
else:
candidates.append(False)
candidates = np.asarray(candidates)
return (features[candidates],imgList,box)
def loadHardNegativesFromList(featuresDir,negativesInfo,featuresExt,numFeatures,totalNegatives,idx=False):
i = 0
task = LoadHardNegatives(negativesInfo)
result = dp.processData(negativesInfo.keys(),featuresDir,featuresExt,task)
hardng = emptyMatrix([totalNegatives,numFeatures])
hardNames = []
boxes = []
while len(result) > 0:
data,imgs,box = result.pop(0)
hardng[i:i+data.shape[0],:] = data
hardNames += imgs
boxes += box
i = i + data.shape[0]
return (hardng[0:i,:],boxes)
def parseRankedDetectionsFile(detectionsLog,maxNegOverlap,maxNegativeVectors):
## Read ranked list of negatives
if isinstance(detectionsLog, basestring):
log = [x.split() for x in open(detectionsLog)]
else:
log = detectionsLog
posExamples = dict()
negExamples = dict()
posCount,negCount,noCares,negTaken = 0,0,0,0
for l in log:
if l[7] == '1':
posCount += 1
try:
posExamples[l[0]] += [ l[1:5] ]
except:
posExamples[l[0]] = [ l[1:5] ]
elif l[7] == '0' and float(l[6]) <= maxNegOverlap:
negCount += 1
if negCount < maxNegativeVectors:
negTaken += 1
try:
negExamples[l[0]] += [ l[1:5] ]
except:
negExamples[l[0]] = [ l[1:5] ]
else:
noCares += 1
print 'NEGEXAMPLES:',np.sum( [len(negExamples[i]) for i in negExamples.keys()] )
print 'Log Of Detections: Pos {:} Neg {:} NoCares {:}'.format(posCount,negCount,noCares)
return {'posExamples':posExamples,'negExamples':negExamples,'negTaken':negTaken}
###############################################
# Compute aspect ratio and size features
###############################################
def addAspectRatioAndSizeFeatures(features,index,aspectRatios,objectSizes):
fs = 12
boxes = np.asmatrix( [ map(float,x[1:]) for x in index] )
imgSize = np.max(boxes,axis=0)[0,2:]
sizes = np.asarray(boxes[:,2]-boxes[:,0])*np.asarray(boxes[:,3]-boxes[:,1])/(500*500) #/(imgSize[0,0]*imgSize[0,1])
sizeF = np.tile(sizes, (1, fs)) - np.tile(objectSizes,(sizes.shape[0],1))
ratios = np.asarray(boxes[:,2]-boxes[:,0])/np.asarray(boxes[:,3]-boxes[:,1])
ratioF = np.tile(ratios, (1, fs)) - np.tile(aspectRatios,(sizes.shape[0],1))
# **
S = np.argsort(np.abs(sizeF),axis=1)
R = np.argsort(np.abs(ratioF),axis=1)
for i in range(len(S)):
sizeF[ i, S[i] ] = [1.,.5,.25,.125,.0625,0.,0.,0.,0.,0.,0.,0.]
ratioF[i, R[i] ] = [1.,.5,.25,.125,.0625,0.,0.,0.,0.,0.,0.,0.]
# **
return np.concatenate( (features, sizeF, ratioF), axis=1 )
def computeAspectRatioAndSizeIntervals(index):
boxes = np.asmatrix( [ map(float,x[1:]) for x in index] )
imgSize = np.max(boxes,axis=0)[0,2:]
sizes = np.asarray(boxes[:,2]-boxes[:,0])*np.asarray(boxes[:,3]-boxes[:,1])/(500*500) #/(imgSize[0,0]*imgSize[0,1])
objectSizes = np.percentile(sizes,range(5,100,10))
ratios = np.asarray(boxes[:,2]-boxes[:,0])/np.asarray(boxes[:,3]-boxes[:,1])
aspectRatios = np.percentile(ratios,range(5,100,10))
# **
objectSizes = [objectSizes[0]*0.5] + objectSizes + [1.0]
aspectRatios = [aspectRatios[0]*0.5] + aspectRatios + [aspectRatios[-1]*1.5]
# **
print 'AspectRatios:',aspectRatios
print 'ObjectSizes:',objectSizes
return aspectRatios,objectSizes
| mit |
zorroblue/scikit-learn | sklearn/decomposition/tests/test_pca.py | 2 | 27961 | import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(ddof=1, axis=0),
np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
expected_result = sorted(expected_result, reverse=True)[:2]
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, ddof=1, axis=0))
assert_array_almost_equal(pca.explained_variance_, expected_result)
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, ddof=1, axis=0))
assert_array_almost_equal(apca.explained_variance_, expected_result)
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, ddof=1, axis=0),
decimal=1)
assert_array_almost_equal(rpca.explained_variance_,
expected_result, decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_singular_values():
# Check that the PCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full',
random_state=rng).fit(X)
apca = PCA(n_components=2, svd_solver='arpack',
random_state=rng).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12)
assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 9)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 0)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
apca = PCA(n_components=3, svd_solver='arpack', random_state=rng)
rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
apca.fit(X_hat)
rpca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
# Ensures that solver-specific extreme inputs for the n_components
# parameter raise errors
X = np.array([[0, 1, 0], [1, 0, 0]])
smallest_d = 2 # The smallest dimension
lower_limit = {'randomized': 1, 'arpack': 1, 'full': 0, 'auto': 0}
for solver in solver_list:
# We conduct the same test on X.T so that it is invariant to axis.
for data in [X, X.T]:
for n_components in [-1, 3]:
if solver == 'auto':
solver_reported = 'full'
else:
solver_reported = solver
assert_raises_regex(ValueError,
"n_components={}L? must be between "
"{}L? and min\(n_samples, n_features\)="
"{}L? with svd_solver=\'{}\'"
.format(n_components,
lower_limit[solver],
smallest_d,
solver_reported),
PCA(n_components,
svd_solver=solver).fit, data)
if solver == 'arpack':
n_components = smallest_d
assert_raises_regex(ValueError,
"n_components={}L? must be "
"strictly less than "
"min\(n_samples, n_features\)={}L?"
" with svd_solver=\'arpack\'"
.format(n_components, smallest_d),
PCA(n_components, svd_solver=solver)
.fit, data)
def test_n_components_none():
# Ensures that n_components == None is handled correctly
X = iris.data
# We conduct the same test on X.T so that it is invariant to axis.
for data in [X, X.T]:
for solver in solver_list:
pca = PCA(svd_solver=solver)
pca.fit(data)
if solver == 'arpack':
assert_equal(pca.n_components_, min(data.shape) - 1)
else:
assert_equal(pca.n_components_, min(data.shape))
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_n_components_mle():
# Ensure that n_components == 'mle' doesn't raise error for auto/full
# svd_solver and raises error for arpack/randomized svd_solver
rng = np.random.RandomState(0)
n_samples = 600
n_features = 10
X = rng.randn(n_samples, n_features)
n_components_dict = {}
for solver in solver_list:
pca = PCA(n_components='mle', svd_solver=solver)
if solver in ['auto', 'full']:
pca.fit(X)
n_components_dict[solver] = pca.n_components_
else: # arpack/randomized solver
error_message = ("n_components='mle' cannot be a string with "
"svd_solver='{}'".format(solver))
assert_raise_message(ValueError, error_message, pca.fit, X)
assert_equal(n_components_dict['auto'], n_components_dict['full'])
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_pca_score_with_different_solvers():
digits = datasets.load_digits()
X_digits = digits.data
pca_dict = {svd_solver: PCA(n_components=30, svd_solver=svd_solver,
random_state=0)
for svd_solver in solver_list}
for pca in pca_dict.values():
pca.fit(X_digits)
# Sanity check for the noise_variance_. For more details see
# https://github.com/scikit-learn/scikit-learn/issues/7568
# https://github.com/scikit-learn/scikit-learn/issues/8541
# https://github.com/scikit-learn/scikit-learn/issues/8544
assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0)
# Compare scores with different svd_solvers
score_dict = {svd_solver: pca.score(X_digits)
for svd_solver, pca in pca_dict.items()}
assert_almost_equal(score_dict['full'], score_dict['arpack'])
assert_almost_equal(score_dict['full'], score_dict['randomized'],
decimal=3)
def test_pca_zero_noise_variance_edge_cases():
# ensure that noise_variance_ is 0 in edge cases
# when n_components == min(n_samples, n_features)
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
# arpack raises ValueError for n_components == min(n_samples,
# n_features)
svd_solvers = ['full', 'randomized']
for svd_solver in svd_solvers:
pca = PCA(svd_solver=svd_solver, n_components=p)
pca.fit(X)
assert pca.noise_variance_ == 0
pca.fit(X.T)
assert pca.noise_variance_ == 0
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_sparse_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
def test_pca_bad_solver():
X = np.random.RandomState(0).rand(5, 4)
pca = PCA(n_components=3, svd_solver='bad_argument')
assert_raises(ValueError, pca.fit, X)
def test_pca_dtype_preservation():
for svd_solver in solver_list:
yield check_pca_float_dtype_preservation, svd_solver
yield check_pca_int_dtype_upcast_to_double, svd_solver
def check_pca_float_dtype_preservation(svd_solver):
# Ensure that PCA does not upscale the dtype when input is float32
X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64)
X_32 = X_64.astype(np.float32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float32
assert pca_64.transform(X_64).dtype == np.float64
assert pca_32.transform(X_32).dtype == np.float32
assert_array_almost_equal(pca_64.components_, pca_32.components_,
decimal=5)
def check_pca_int_dtype_upcast_to_double(svd_solver):
# Ensure that all int types will be upcast to float64
X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
X_i64 = X_i64.astype(np.int64)
X_i32 = X_i64.astype(np.int32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_i64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_i32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float64
assert pca_64.transform(X_i64).dtype == np.float64
assert pca_32.transform(X_i32).dtype == np.float64
assert_array_almost_equal(pca_64.components_, pca_32.components_,
decimal=5)
| bsd-3-clause |
jhbsz/OSI-OS | cddl/contrib/opensolaris/lib/pyzfs/common/dataset.py | 9 | 6879 | #! /usr/bin/python2.6
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
#
"""Implements the Dataset class, providing methods for manipulating ZFS
datasets. Also implements the Property class, which describes ZFS
properties."""
import zfs.ioctl
import zfs.util
import errno
_ = zfs.util._
class Property(object):
"""This class represents a ZFS property. It contains
information about the property -- if it's readonly, a number vs
string vs index, etc. Only native properties are represented by
this class -- not user properties (eg "user:prop") or userspace
properties (eg "userquota@joe")."""
__slots__ = "name", "number", "type", "default", "attr", "validtypes", \
"values", "colname", "rightalign", "visible", "indextable"
__repr__ = zfs.util.default_repr
def __init__(self, t):
"""t is the tuple of information about this property
from zfs.ioctl.get_proptable, which should match the
members of zprop_desc_t (see zfs_prop.h)."""
self.name = t[0]
self.number = t[1]
self.type = t[2]
if self.type == "string":
self.default = t[3]
else:
self.default = t[4]
self.attr = t[5]
self.validtypes = t[6]
self.values = t[7]
self.colname = t[8]
self.rightalign = t[9]
self.visible = t[10]
self.indextable = t[11]
def delegatable(self):
"""Return True if this property can be delegated with
"zfs allow"."""
return self.attr != "readonly"
proptable = dict()
for name, t in zfs.ioctl.get_proptable().iteritems():
proptable[name] = Property(t)
del name, t
def getpropobj(name):
"""Return the Property object that is identified by the given
name string. It can be the full name, or the column name."""
try:
return proptable[name]
except KeyError:
for p in proptable.itervalues():
if p.colname and p.colname.lower() == name:
return p
raise
class Dataset(object):
"""Represents a ZFS dataset (filesystem, snapshot, zvol, clone, etc).
Generally, this class provides interfaces to the C functions in
zfs.ioctl which actually interface with the kernel to manipulate
datasets.
Unless otherwise noted, any method can raise a ZFSError to
indicate failure."""
__slots__ = "name", "__props"
__repr__ = zfs.util.default_repr
def __init__(self, name, props=None,
types=("filesystem", "volume"), snaps=True):
"""Open the named dataset, checking that it exists and
is of the specified type.
name is the string name of this dataset.
props is the property settings dict from zfs.ioctl.next_dataset.
types is an iterable of strings specifying which types
of datasets are permitted. Accepted strings are
"filesystem" and "volume". Defaults to accepting all
types.
snaps is a boolean specifying if snapshots are acceptable.
Raises a ZFSError if the dataset can't be accessed (eg
doesn't exist) or is not of the specified type.
"""
self.name = name
e = zfs.util.ZFSError(errno.EINVAL,
_("cannot open %s") % name,
_("operation not applicable to datasets of this type"))
if "@" in name and not snaps:
raise e
if not props:
props = zfs.ioctl.dataset_props(name)
self.__props = props
if "volume" not in types and self.getprop("type") == 3:
raise e
if "filesystem" not in types and self.getprop("type") == 2:
raise e
def getprop(self, propname):
"""Return the value of the given property for this dataset.
Currently only works for native properties (those with a
Property object.)
Raises KeyError if propname does not specify a native property.
Does not raise ZFSError.
"""
p = getpropobj(propname)
try:
return self.__props[p.name]["value"]
except KeyError:
return p.default
def parent(self):
"""Return a Dataset representing the parent of this one."""
return Dataset(self.name[:self.name.rindex("/")])
def descendents(self):
"""A generator function which iterates over all
descendent Datasets (not including snapshots."""
cookie = 0
while True:
# next_dataset raises StopIteration when done
(name, cookie, props) = \
zfs.ioctl.next_dataset(self.name, False, cookie)
ds = Dataset(name, props)
yield ds
for child in ds.descendents():
yield child
def userspace(self, prop):
"""A generator function which iterates over a
userspace-type property.
prop specifies which property ("userused@",
"userquota@", "groupused@", or "groupquota@").
returns 3-tuple of domain (string), rid (int), and space (int).
"""
d = zfs.ioctl.userspace_many(self.name, prop)
for ((domain, rid), space) in d.iteritems():
yield (domain, rid, space)
def userspace_upgrade(self):
"""Initialize the accounting information for
userused@... and groupused@... properties."""
return zfs.ioctl.userspace_upgrade(self.name)
def set_fsacl(self, un, d):
"""Add to the "zfs allow"-ed permissions on this Dataset.
un is True if the specified permissions should be removed.
d is a dict specifying which permissions to add/remove:
{ "whostr" -> None # remove all perms for this entity
"whostr" -> { "perm" -> None} # add/remove these perms
} """
return zfs.ioctl.set_fsacl(self.name, un, d)
def get_fsacl(self):
"""Get the "zfs allow"-ed permissions on the Dataset.
Return a dict("whostr": { "perm" -> None })."""
return zfs.ioctl.get_fsacl(self.name)
def get_holds(self):
"""Get the user holds on this Dataset.
Return a dict("tag": timestamp)."""
return zfs.ioctl.get_holds(self.name)
def snapshots_fromcmdline(dsnames, recursive):
for dsname in dsnames:
if not "@" in dsname:
raise zfs.util.ZFSError(errno.EINVAL,
_("cannot open %s") % dsname,
_("operation only applies to snapshots"))
try:
ds = Dataset(dsname)
yield ds
except zfs.util.ZFSError, e:
if not recursive or e.errno != errno.ENOENT:
raise
if recursive:
(base, snapname) = dsname.split('@')
parent = Dataset(base)
for child in parent.descendents():
try:
yield Dataset(child.name + "@" +
snapname)
except zfs.util.ZFSError, e:
if e.errno != errno.ENOENT:
raise
| bsd-3-clause |
shikhardb/scikit-learn | examples/svm/plot_oneclass.py | 248 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
YRSNorwich/ProjectBlock | indev.py | 1 | 9768 | '''
Created on Jul 22, 2011
@author: Rio
'''
"""
Indev levels:
TAG_Compound "MinecraftLevel"
{
TAG_Compound "Environment"
{
TAG_Short "SurroundingGroundHeight"// Height of surrounding ground (in blocks)
TAG_Byte "SurroundingGroundType" // Block ID of surrounding ground
TAG_Short "SurroundingWaterHeight" // Height of surrounding water (in blocks)
TAG_Byte "SurroundingWaterType" // Block ID of surrounding water
TAG_Short "CloudHeight" // Height of the cloud layer (in blocks)
TAG_Int "CloudColor" // Hexadecimal value for the color of the clouds
TAG_Int "SkyColor" // Hexadecimal value for the color of the sky
TAG_Int "FogColor" // Hexadecimal value for the color of the fog
TAG_Byte "SkyBrightness" // The brightness of the sky, from 0 to 100
}
TAG_List "Entities"
{
TAG_Compound
{
// One of these per entity on the map.
// These can change a lot, and are undocumented.
// Feel free to play around with them, though.
// The most interesting one might be the one with ID "LocalPlayer", which contains the player inventory
}
}
TAG_Compound "Map"
{
// To access a specific block from either byte array, use the following algorithm:
// Index = x + (y * Depth + z) * Width
TAG_Short "Width" // Width of the level (along X)
TAG_Short "Height" // Height of the level (along Y)
TAG_Short "Length" // Length of the level (along Z)
TAG_Byte_Array "Blocks" // An array of Length*Height*Width bytes specifying the block types
TAG_Byte_Array "Data" // An array of Length*Height*Width bytes with data for each blocks
TAG_List "Spawn" // Default spawn position
{
TAG_Short x // These values are multiplied by 32 before being saved
TAG_Short y // That means that the actual values are x/32.0, y/32.0, z/32.0
TAG_Short z
}
}
TAG_Compound "About"
{
TAG_String "Name" // Level name
TAG_String "Author" // Name of the player who made the level
TAG_Long "CreatedOn" // Timestamp when the level was first created
}
}
"""
from mclevelbase import *
MinecraftLevel = "MinecraftLevel"
Environment = "Environment"
SurroundingGroundHeight = "SurroundingGroundHeight"
SurroundingGroundType = "SurroundingGroundType"
SurroundingWaterHeight = "SurroundingWaterHeight"
SurroundingWaterType = "SurroundingWaterType"
CloudHeight = "CloudHeight"
CloudColor = "CloudColor"
SkyColor = "SkyColor"
FogColor = "FogColor"
SkyBrightness = "SkyBrightness"
About = "About"
Name = "Name"
Author = "Author"
CreatedOn = "CreatedOn"
Spawn = "Spawn"
__all__ = ["MCIndevLevel"]
class MCIndevLevel(MCLevel):
""" IMPORTANT: self.Blocks and self.Data are indexed with [x,z,y] via axis
swapping to be consistent with infinite levels."""
hasEntities = True
def setPlayerSpawnPosition(self, pos, player=None):
assert len(pos) == 3
self.Spawn = array(pos);
def playerSpawnPosition(self, player=None):
return self.Spawn;
def setPlayerPosition(self, pos, player="Ignored"):
for x in self.root_tag["Entities"]:
if x["id"].value == "LocalPlayer":
x["Pos"] = TAG_List([TAG_Float(p) for p in pos])
def getPlayerPosition(self, player="Ignored"):
for x in self.root_tag["Entities"]:
if x["id"].value == "LocalPlayer":
return array(map(lambda x:x.value, x["Pos"]));
def setPlayerOrientation(self, yp, player="Ignored"):
for x in self.root_tag["Entities"]:
if x["id"].value == "LocalPlayer":
x["Rotation"] = TAG_List([TAG_Float(p) for p in yp])
def playerOrientation(self, player="Ignored"):
""" returns (yaw, pitch) """
for x in self.root_tag["Entities"]:
if x["id"].value == "LocalPlayer":
return array(map(lambda x:x.value, x["Rotation"]));
def setBlockDataAt(self, x, y, z, newdata):
if x < 0 or y < 0 or z < 0: return 0
if x >= self.Width or y >= self.Height or z >= self.Length: return 0;
self.Data[x, z, y] = (newdata & 0xf);
def blockDataAt(self, x, y, z):
if x < 0 or y < 0 or z < 0: return 0
if x >= self.Width or y >= self.Height or z >= self.Length: return 0;
return self.Data[x, z, y];
def blockLightAt(self, x, y, z):
if x < 0 or y < 0 or z < 0: return 0
if x >= self.Width or y >= self.Height or z >= self.Length: return 0;
return self.BlockLight[x, z, y];
def __repr__(self):
return u"MCIndevLevel({0}): {1}W {2}L {3}H".format(self.filename, self.Width, self.Length, self.Height)
@classmethod
def _isTagLevel(cls, root_tag):
return "MinecraftLevel" == root_tag.name
def __init__(self, root_tag=None, filename=""):
self.Width = 0
self.Height = 0
self.Length = 0
self.Blocks = array([], uint8)
self.Data = array([], uint8)
self.Spawn = (0, 0, 0)
self.filename = filename;
if root_tag:
self.root_tag = root_tag;
mapTag = root_tag[Map];
self.Width = mapTag[Width].value
self.Length = mapTag[Length].value
self.Height = mapTag[Height].value
mapTag[Blocks].value.shape = (self.Height, self.Length, self.Width)
self.Blocks = swapaxes(mapTag[Blocks].value, 0, 2)
mapTag[Data].value.shape = (self.Height, self.Length, self.Width)
self.Data = swapaxes(mapTag[Data].value, 0, 2)
self.BlockLight = self.Data & 0xf
self.Data >>= 4
self.Spawn = [mapTag[Spawn][i].value for i in range(3)];
if not Entities in root_tag:
root_tag[Entities] = TAG_List();
self.Entities = root_tag[Entities]
if not TileEntities in root_tag:
root_tag[TileEntities] = TAG_List();
self.TileEntities = root_tag[TileEntities]
if len(filter(lambda x:x['id'].value == 'LocalPlayer', root_tag[Entities])) == 0: #omen doesn't make a player entity
p = TAG_Compound()
p['id'] = TAG_String('LocalPlayer')
p['Pos'] = TAG_List([TAG_Float(0.), TAG_Float(64.), TAG_Float(0.)])
p['Rotation'] = TAG_List([TAG_Float(0.), TAG_Float(45.)])
root_tag[Entities].append(p)
#self.saveInPlace();
else:
info(u"Creating new Indev levels is not yet implemented.!")
raise ValueError, "Can't do that yet"
# self.SurroundingGroundHeight = root_tag[Environment][SurroundingGroundHeight].value
# self.SurroundingGroundType = root_tag[Environment][SurroundingGroundType].value
# self.SurroundingWaterHeight = root_tag[Environment][SurroundingGroundHeight].value
# self.SurroundingWaterType = root_tag[Environment][SurroundingWaterType].value
# self.CloudHeight = root_tag[Environment][CloudHeight].value
# self.CloudColor = root_tag[Environment][CloudColor].value
# self.SkyColor = root_tag[Environment][SkyColor].value
# self.FogColor = root_tag[Environment][FogColor].value
# self.SkyBrightness = root_tag[Environment][SkyBrightness].value
# self.TimeOfDay = root_tag[Environment]["TimeOfDay"].value
#
#
# self.Name = self.root_tag[About][Name].value
# self.Author = self.root_tag[About][Author].value
# self.CreatedOn = self.root_tag[About][CreatedOn].value
def rotateLeft(self):
MCLevel.rotateLeft(self);
self.Data = swapaxes(self.Data, 1, 0)[:, ::-1, :]; #x=y; y=-x
torchRotation = array([0, 4, 3, 1, 2, 5,
6, 7,
8, 9, 10, 11, 12, 13, 14, 15]);
torchIndexes = (self.Blocks == self.materials.Torch.ID)
info(u"Rotating torches: {0}".format(len(torchIndexes.nonzero()[0])))
self.Data[torchIndexes] = torchRotation[self.Data[torchIndexes]]
def saveToFile(self, filename=None):
if filename == None: filename = self.filename;
if filename == None:
warn(u"Attempted to save an unnamed file in place")
return; #you fool!
self.Data <<= 4;
self.Data |= (self.BlockLight & 0xf)
self.Blocks = swapaxes(self.Blocks, 0, 2)
self.Data = swapaxes(self.Data, 0, 2)
mapTag = TAG_Compound(name=Map);
mapTag[Width] = TAG_Short(self.Width);
mapTag[Height] = TAG_Short(self.Height);
mapTag[Length] = TAG_Short(self.Length);
mapTag[Blocks] = TAG_Byte_Array(self.Blocks);
mapTag[Data] = TAG_Byte_Array(self.Data);
self.Blocks = swapaxes(self.Blocks, 0, 2)
self.Data = swapaxes(self.Data, 0, 2)
mapTag[Spawn] = TAG_List([TAG_Short(i) for i in self.Spawn])
self.root_tag[Map] = mapTag;
self.root_tag[Map]
#output_file = gzip.open(self.filename, "wb", compresslevel=1)
try:
os.rename(filename, filename + ".old");
except Exception, e:
pass
try:
self.root_tag.saveGzipped(filename);
except:
os.rename(filename + ".old", filename);
try: os.remove(filename + ".old");
except Exception, e:
pass
self.BlockLight = self.Data & 0xf
self.Data >>= 4
| isc |
wdurhamh/statsmodels | statsmodels/tools/tests/test_grouputils.py | 31 | 11494 | import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
| bsd-3-clause |
wesm/statsmodels | scikits/statsmodels/examples/example_discrete.py | 1 | 2153 | """Example: scikits.statsmodels.discretemod
"""
import numpy as np
import scikits.statsmodels.api as sm
# load the data from Spector and Mazzeo (1980)
# Examples follow Greene's Econometric Analysis Ch. 21 (5th Edition).
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
# Linear Probability Model using OLS
lpm_mod = sm.OLS(spector_data.endog,spector_data.exog)
lpm_res = lpm_mod.fit()
# Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
logit_res = logit_mod.fit()
# Probit Model
probit_mod = sm.Probit(spector_data.endog, spector_data.exog)
probit_res = probit_mod.fit()
print "This example is based on Greene Table 21.1 5th Edition"
print "Linear Model"
print lpm_res.params
print "Logit Model"
print logit_res.params
print "Probit Model"
print probit_res.params
#print "Typo in Greene for Weibull, replaced with logWeibull or Gumbel"
#print "(Tentatively) Weibull Model"
#print weibull_res.params
print "Linear Model"
print lpm_res.params[:-1]
print "Logit Model"
print logit_res.margeff()
print "Probit Model"
print probit_res.margeff()
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog[:,0] = np.log(anes_exog[:,0] + .1)
anes_exog = np.column_stack((anes_exog[:,0],anes_exog[:,2],anes_exog[:,5:8]))
anes_exog = sm.add_constant(anes_exog)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
# The default method for the fit is Newton-Raphson
# However, you can use other solvers
mlogit_res = mlogit_mod.fit(method='bfgs', maxiter=100)
# The below needs a lot of iterations to get it right?
#TODO: Add a technical note on algorithms
#mlogit_res = mlogit_mod.fit(method='ncg') # this takes forever
# Poisson model
# This is similar to Cameron and Trivedi's Microeconometrics
# Table 20.5; however, the data differs slightly from theirs
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog)
poisson_mod = sm.Poisson(rand_data.endog, rand_exog)
poisson_res = poisson_mod.fit(method="newton")
print poisson_res.summary()
| bsd-3-clause |
astocko/statsmodels | statsmodels/tsa/tsatools.py | 6 | 20188 | from statsmodels.compat.python import range, lrange, lzip
import numpy as np
import numpy.lib.recfunctions as nprf
from statsmodels.tools.tools import add_constant
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
def add_trend(X, trend="c", prepend=False, has_constant='skip'):
"""
Adds a trend and/or constant to an array.
Parameters
----------
X : array-like
Original array of data.
trend : str {"c","t","ct","ctt"}
"c" add constant only
"t" add trend only
"ct" add constant and linear trend
"ctt" add constant and linear and quadratic trend.
prepend : bool
If True, prepends the new data to the columns of X.
has_constant : str {'raise', 'add', 'skip'}
Controls what happens when trend is 'c' and a constant already
exists in X. 'raise' will raise an error. 'add' will duplicate a
constant. 'skip' will return the data without change. 'skip' is the
default.
Notes
-----
Returns columns as ["ctt","ct","c"] whenever applicable. There is currently
no checking for an existing trend.
See also
--------
statsmodels.add_constant
"""
#TODO: could be generalized for trend of aribitrary order
trend = trend.lower()
if trend == "c": # handles structured arrays
return add_constant(X, prepend=prepend, has_constant=has_constant)
elif trend == "ct" or trend == "t":
trendorder = 1
elif trend == "ctt":
trendorder = 2
else:
raise ValueError("trend %s not understood" % trend)
X = np.asanyarray(X)
nobs = len(X)
trendarr = np.vander(np.arange(1,nobs+1, dtype=float), trendorder+1)
# put in order ctt
trendarr = np.fliplr(trendarr)
if trend == "t":
trendarr = trendarr[:,1]
if not X.dtype.names:
# check for constant
if "c" in trend and np.any(np.ptp(X, axis=0) == 0):
if has_constant == 'raise':
raise ValueError("X already contains a constant")
elif has_constant == 'add':
pass
elif has_constant == 'skip' and trend == "ct":
trendarr = trendarr[:, 1]
if not prepend:
X = np.column_stack((X, trendarr))
else:
X = np.column_stack((trendarr, X))
else:
return_rec = data.__clas__ is np.recarray
if trendorder == 1:
if trend == "ct":
dt = [('const',float),('trend',float)]
else:
dt = [('trend', float)]
elif trendorder == 2:
dt = [('const',float),('trend',float),('trend_squared', float)]
trendarr = trendarr.view(dt)
if prepend:
X = nprf.append_fields(trendarr, X.dtype.names, [X[i] for i
in X.dtype.names], usemask=False, asrecarray=return_rec)
else:
X = nprf.append_fields(X, trendarr.dtype.names, [trendarr[i] for i
in trendarr.dtype.names], usemask=False, asrecarray=return_rec)
return X
def add_lag(x, col=None, lags=1, drop=False, insert=True):
"""
Returns an array with lags included given an array.
Parameters
----------
x : array
An array or NumPy ndarray subclass. Can be either a 1d or 2d array with
observations in columns.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column containing the variable. Or `col` can
be an int of the zero-based column index. If it's a 1d array `col`
can be None.
lags : int
The number of lags desired.
drop : bool
Whether to keep the contemporaneous variable for the data.
insert : bool or int
If True, inserts the lagged values after `col`. If False, appends
the data. If int inserts the lags at int.
Returns
-------
array : ndarray
Array with lags
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.macrodata.load()
>>> data = data.data[['year','quarter','realgdp','cpi']]
>>> data = sm.tsa.add_lag(data, 'realgdp', lags=2)
Notes
-----
Trims the array both forward and backward, so that the array returned
so that the length of the returned array is len(`X`) - lags. The lags are
returned in increasing order, ie., t-1,t-2,...,t-lags
"""
if x.dtype.names:
names = x.dtype.names
if not col and np.squeeze(x).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
elif len(names) == 1:
col = names[0]
if isinstance(col, int):
col = x.dtype.names[col]
contemp = x[col]
# make names for lags
tmp_names = [col + '_'+'L(%i)' % i for i in range(1,lags+1)]
ndlags = lagmat(contemp, maxlag=lags, trim='Both')
# get index for return
if insert is True:
ins_idx = list(names).index(col) + 1
elif insert is False:
ins_idx = len(names) + 1
else: # insert is an int
if insert > len(names):
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position",
UserWarning)
ins_idx = insert
first_names = list(names[:ins_idx])
last_names = list(names[ins_idx:])
if drop:
if col in first_names:
first_names.pop(first_names.index(col))
else:
last_names.pop(last_names.index(col))
if first_names: # only do this if x isn't "empty"
first_arr = nprf.append_fields(x[first_names][lags:],tmp_names,
ndlags.T, usemask=False)
else:
first_arr = np.zeros(len(x)-lags, dtype=lzip(tmp_names,
(x[col].dtype,)*lags))
for i,name in enumerate(tmp_names):
first_arr[name] = ndlags[:,i]
if last_names:
return nprf.append_fields(first_arr, last_names,
[x[name][lags:] for name in last_names], usemask=False)
else: # lags for last variable
return first_arr
else: # we have an ndarray
if x.ndim == 1: # make 2d if 1d
x = x[:,None]
if col is None:
col = 0
# handle negative index
if col < 0:
col = x.shape[1] + col
contemp = x[:,col]
if insert is True:
ins_idx = col + 1
elif insert is False:
ins_idx = x.shape[1]
else:
if insert < 0: # handle negative index
insert = x.shape[1] + insert + 1
if insert > x.shape[1]:
insert = x.shape[1]
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position",
UserWarning)
ins_idx = insert
ndlags = lagmat(contemp, lags, trim='Both')
first_cols = lrange(ins_idx)
last_cols = lrange(ins_idx,x.shape[1])
if drop:
if col in first_cols:
first_cols.pop(first_cols.index(col))
else:
last_cols.pop(last_cols.index(col))
return np.column_stack((x[lags:,first_cols],ndlags,
x[lags:,last_cols]))
def detrend(x, order=1, axis=0):
'''detrend an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, 1d or 2d
data, if 2d, then each row or column is independently detrended with the
same trendorder, but independent trend estimates
order : int
specifies the polynomial order of the trend, zero is constant, one is
linear trend, two is quadratic trend
axis : int
axis can be either 0, observations by rows,
or 1, observations by columns
Returns
-------
detrended data series : ndarray
The detrended series is the residual of the linear regression of the
data on the trend of given order.
'''
x = np.asarray(x)
nobs = x.shape[0]
if order == 0:
return x - np.expand_dims(x.mean(axis), axis)
else:
if x.ndim == 2 and lrange(2)[axis]==1:
x = x.T
elif x.ndim > 2:
raise NotImplementedError('x.ndim>2 is not implemented until it is needed')
#could use a polynomial, but this should work also with 2d x, but maybe not yet
trends = np.vander(np.arange(nobs).astype(float), N=order+1)
beta = np.linalg.lstsq(trends, x)[0]
resid = x - np.dot(trends, beta)
if x.ndim == 2 and lrange(2)[axis]==1:
resid = resid.T
return resid
def lagmat(x, maxlag, trim='forward', original='ex'):
'''create 2d array of lags
Parameters
----------
x : array_like, 1d or 2d
data; if 2d, observation in rows and variables in columns
maxlag : int or sequence of ints
all lags from zero to maxlag are included
trim : str {'forward', 'backward', 'both', 'none'} or None
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none', None : no trimming of observations
original : str {'ex','sep','in'}
* 'ex' : drops the original array returning only the lagged values.
* 'in' : returns the original array and the lagged values as a single
array.
* 'sep' : returns a tuple (original array, lagged values). The original
array is truncated to have the same number of rows as
the returned lagmat.
Returns
-------
lagmat : 2d array
array with lagged observations
y : 2d array, optional
Only returned if original == 'sep'
Examples
--------
>>> from statsmodels.tsa.tsatools import lagmat
>>> import numpy as np
>>> X = np.arange(1,7).reshape(-1,2)
>>> lagmat(X, maxlag=2, trim="forward", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="backward", original='in')
array([[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
>>> lagmat(X, maxlag=2, trim="both", original='in')
array([[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="none", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
Notes
-----
TODO:
* allow list of lags additional to maxlag
* create varnames for columns
'''
x = np.asarray(x)
dropidx = 0
if x.ndim == 1:
x = x[:,None]
nobs, nvar = x.shape
if original in ['ex','sep']:
dropidx = nvar
if maxlag >= nobs:
raise ValueError("maxlag should be < nobs")
lm = np.zeros((nobs+maxlag, nvar*(maxlag+1)))
for k in range(0, int(maxlag+1)):
lm[maxlag-k:nobs+maxlag-k, nvar*(maxlag-k):nvar*(maxlag-k+1)] = x
if trim:
trimlower = trim.lower()
else:
trimlower = trim
if trimlower == 'none' or not trimlower:
startobs = 0
stopobs = len(lm)
elif trimlower == 'forward':
startobs = 0
stopobs = nobs+maxlag-k
elif trimlower == 'both':
startobs = maxlag
stopobs = nobs+maxlag-k
elif trimlower == 'backward':
startobs = maxlag
stopobs = len(lm)
else:
raise ValueError('trim option not valid')
if original == 'sep':
return lm[startobs:stopobs,dropidx:], x[startobs:stopobs]
else:
return lm[startobs:stopobs,dropidx:]
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward'):
'''generate lagmatrix for 2d array, columns arranged by variables
Parameters
----------
x : array_like, 2d
2d data, observation in rows and variables in columns
maxlag0 : int
for first variable all lags from zero to maxlag are included
maxlagex : None or int
max lag for all other variables all lags from zero to maxlag are included
dropex : int (default is 0)
exclude first dropex lags from other variables
for all variables, except the first, lags from dropex to maxlagex are
included
trim : string
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none' : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations, columns ordered by variable
Notes
-----
very inefficient for unequal lags, just done for convenience
'''
if maxlagex is None:
maxlagex = maxlag0
maxlag = max(maxlag0, maxlagex)
nobs, nvar = x.shape
lagsli = [lagmat(x[:,0], maxlag, trim=trim, original='in')[:,:maxlag0+1]]
for k in range(1,nvar):
lagsli.append(lagmat(x[:,k], maxlag, trim=trim, original='in')[:,dropex:maxlagex+1])
return np.column_stack(lagsli)
def vec(mat):
return mat.ravel('F')
def vech(mat):
# Gets Fortran-order
return mat.T.take(_triu_indices(len(mat)))
# tril/triu/diag, suitable for ndarray.take
def _tril_indices(n):
rows, cols = np.tril_indices(n)
return rows * n + cols
def _triu_indices(n):
rows, cols = np.triu_indices(n)
return rows * n + cols
def _diag_indices(n):
rows, cols = np.diag_indices(n)
return rows * n + cols
def unvec(v):
k = int(np.sqrt(len(v)))
assert(k * k == len(v))
return v.reshape((k, k), order='F')
def unvech(v):
# quadratic formula, correct fp error
rows = .5 * (-1 + np.sqrt(1 + 8 * len(v)))
rows = int(np.round(rows))
result = np.zeros((rows, rows))
result[np.triu_indices(rows)] = v
result = result + result.T
# divide diagonal elements by 2
result[np.diag_indices(rows)] /= 2
return result
def duplication_matrix(n):
"""
Create duplication matrix D_n which satisfies vec(S) = D_n vech(S) for
symmetric matrix S
Returns
-------
D_n : ndarray
"""
tmp = np.eye(n * (n + 1) / 2)
return np.array([unvech(x).ravel() for x in tmp]).T
def elimination_matrix(n):
"""
Create the elimination matrix L_n which satisfies vech(M) = L_n vec(M) for
any matrix M
Parameters
----------
Returns
-------
"""
vech_indices = vec(np.tril(np.ones((n, n))))
return np.eye(n * n)[vech_indices != 0]
def commutation_matrix(p, q):
"""
Create the commutation matrix K_{p,q} satisfying vec(A') = K_{p,q} vec(A)
Parameters
----------
p : int
q : int
Returns
-------
K : ndarray (pq x pq)
"""
K = np.eye(p * q)
indices = np.arange(p * q).reshape((p, q), order='F')
return K.take(indices.ravel(), axis=0)
def _ar_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array
The AR coefficients
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/
(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/
(1+np.exp(-params))).copy()
for j in range(1,len(params)):
a = newparams[j]
for kiter in range(j):
tmp[kiter] -= a * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ar_invtransparams(params):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array
The transformed AR coefficients
"""
# AR coeffs
tmp = params.copy()
for j in range(len(params)-1,0,-1):
a = params[j]
for kiter in range(j):
tmp[kiter] = (params[kiter] + a * params[j-kiter-1])/\
(1-a**2)
params[:j] = tmp[:j]
invarcoefs = -np.log((1-params)/(1+params))
return invarcoefs
def _ma_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array
The ma coeffecients of an (AR)MA model.
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
# levinson-durbin to get macf
for j in range(1,len(params)):
b = newparams[j]
for kiter in range(j):
tmp[kiter] += b * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ma_invtransparams(macoefs):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array
The transformed MA coefficients
"""
tmp = macoefs.copy()
for j in range(len(macoefs)-1,0,-1):
b = macoefs[j]
for kiter in range(j):
tmp[kiter] = (macoefs[kiter]-b *macoefs[j-kiter-1])/(1-b**2)
macoefs[:j] = tmp[:j]
invmacoefs = -np.log((1-macoefs)/(1+macoefs))
return invmacoefs
def unintegrate_levels(x, d):
"""
Returns the successive differences needed to unintegrate the series.
Parameters
----------
x : array-like
The original series
d : int
The number of differences of the differenced series.
Returns
-------
y : array-like
The increasing differences from 0 to d-1 of the first d elements
of x.
See Also
--------
unintegrate
"""
x = x[:d]
return np.asarray([np.diff(x, d - i)[0] for i in range(d, 0, -1)])
def unintegrate(x, levels):
"""
After taking n-differences of a series, return the original series
Parameters
----------
x : array-like
The n-th differenced series
levels : list
A list of the first-value in each differenced series, for
[first-difference, second-difference, ..., n-th difference]
Returns
-------
y : array-like
The original series de-differenced
Examples
--------
>>> x = np.array([1, 3, 9., 19, 8.])
>>> levels = unintegrate_levels(x, 2)
>>> levels
array([ 1., 2.])
>>> unintegrate(np.diff(x, 2), levels)
array([ 1., 3., 9., 19., 8.])
"""
levels = list(levels)[:] # copy
if len(levels) > 1:
x0 = levels.pop(-1)
return unintegrate(np.cumsum(np.r_[x0, x]), levels)
x0 = levels[0]
return np.cumsum(np.r_[x0, x])
def freq_to_period(freq):
"""
Convert a pandas frequency to a periodicity
Parameters
----------
freq : str or offset
Frequency to convert
Returns
-------
period : int
Periodicity of freq
Notes
-----
Annual maps to 1, quarterly maps to 4, monthly to 12, weekly to 52.
"""
if not isinstance(freq, offsets.DateOffset):
freq = to_offset(freq) # go ahead and standardize
freq = freq.rule_code.upper()
if freq == 'A' or freq.startswith(('A-', 'AS-')):
return 1
elif freq == 'Q' or freq.startswith(('Q-', 'QS-')):
return 4
elif freq == 'M' or freq.startswith(('M-', 'MS')):
return 12
elif freq == 'B' or freq == 'W' or freq.startswith('W-'):
return 52
else: # pragma : no cover
raise ValueError("freq {} not understood. Please report if you "
"think this in error.".format(freq))
__all__ = ['lagmat', 'lagmat2ds','add_trend', 'duplication_matrix',
'elimination_matrix', 'commutation_matrix',
'vec', 'vech', 'unvec', 'unvech']
if __name__ == '__main__':
# sanity check, mainly for imports
x = np.random.normal(size=(100,2))
tmp = lagmat(x,2)
tmp = lagmat2ds(x,2)
# grangercausalitytests(x, 2)
| bsd-3-clause |
ray-project/ray | rllib/algorithms/impala/vtrace_torch.py | 1 | 14451 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch version of the functions to compute V-trace off-policy actor critic
targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
In addition to the original paper's code, changes have been made
to support MultiDiscrete action spaces. behaviour_policy_logits,
target_policy_logits and actions parameters in the entry point
multi_from_logits method accepts lists of tensors instead of just
tensors.
"""
from ray.rllib.algorithms.impala.vtrace_tf import VTraceFromLogitsReturns, VTraceReturns
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.utils import force_list
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
torch, nn = try_import_torch()
def log_probs_from_logits_and_actions(
policy_logits, actions, dist_class=TorchCategorical, model=None
):
return multi_log_probs_from_logits_and_actions(
[policy_logits], [actions], dist_class, model
)[0]
def multi_log_probs_from_logits_and_actions(policy_logits, actions, dist_class, model):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes [T, B, ACTION_SPACE[0]], ...,
[T, B, ACTION_SPACE[-1]] with un-normalized log-probabilities
parameterizing a softmax policy.
actions: A list with length of ACTION_SPACE of tensors of shapes
[T, B, ...], ..., [T, B, ...]
with actions.
dist_class: Python class of the action distribution.
Returns:
A list with length of ACTION_SPACE of float32 tensors of shapes
[T, B], ..., [T, B] corresponding to the sampling log probability
of the chosen action w.r.t. the policy.
"""
log_probs = []
for i in range(len(policy_logits)):
p_shape = policy_logits[i].shape
a_shape = actions[i].shape
policy_logits_flat = torch.reshape(policy_logits[i], (-1,) + tuple(p_shape[2:]))
actions_flat = torch.reshape(actions[i], (-1,) + tuple(a_shape[2:]))
log_probs.append(
torch.reshape(
dist_class(policy_logits_flat, model).logp(actions_flat), a_shape[:2]
)
)
return log_probs
def from_logits(
behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class=TorchCategorical,
model=None,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""multi_from_logits wrapper used only for tests"""
res = multi_from_logits(
[behaviour_policy_logits],
[target_policy_logits],
[actions],
discounts,
rewards,
values,
bootstrap_value,
dist_class,
model,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
assert len(res.behaviour_action_log_probs) == 1
assert len(res.target_action_log_probs) == 1
return VTraceFromLogitsReturns(
vs=res.vs,
pg_advantages=res.pg_advantages,
log_rhos=res.log_rhos,
behaviour_action_log_probs=res.behaviour_action_log_probs[0],
target_action_log_probs=res.target_action_log_probs[0],
)
def multi_from_logits(
behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class,
model,
behaviour_action_log_probs=None,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
behaviour_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes [T, B, ACTION_SPACE[0]], ...,
[T, B, ACTION_SPACE[-1]] with un-normalized log-probabilities
parameterizing the softmax behavior policy.
target_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes [T, B, ACTION_SPACE[0]], ...,
[T, B, ACTION_SPACE[-1]] with un-normalized log-probabilities
parameterizing the softmax target policy.
actions: A list with length of ACTION_SPACE of tensors of shapes
[T, B, ...], ..., [T, B, ...]
with actions sampled from the behavior policy.
discounts: A float32 tensor of shape [T, B] with the discount
encountered when following the behavior policy.
rewards: A float32 tensor of shape [T, B] with the rewards generated by
following the behavior policy.
values: A float32 tensor of shape [T, B] with the value function
estimates wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function
estimate at time T.
dist_class: action distribution class for the logits.
model: backing ModelV2 instance
behaviour_action_log_probs: Precalculated values of the behavior
actions.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold
for importance weights (rho) when calculating the baseline targets
(vs). rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping
threshold on rho_s in:
\rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape [T, B]. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape [T, B]. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape [T, B] containing the log
importance sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape [T, B] containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape [T, B] containing
target policy action probabilities (log \pi(a_t)).
"""
behaviour_policy_logits = convert_to_torch_tensor(
behaviour_policy_logits, device="cpu"
)
target_policy_logits = convert_to_torch_tensor(target_policy_logits, device="cpu")
actions = convert_to_torch_tensor(actions, device="cpu")
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
for i in range(len(behaviour_policy_logits)):
assert len(behaviour_policy_logits[i].size()) == 3
assert len(target_policy_logits[i].size()) == 3
target_action_log_probs = multi_log_probs_from_logits_and_actions(
target_policy_logits, actions, dist_class, model
)
if len(behaviour_policy_logits) > 1 or behaviour_action_log_probs is None:
# can't use precalculated values, recompute them. Note that
# recomputing won't work well for autoregressive action dists
# which may have variables not captured by 'logits'
behaviour_action_log_probs = multi_log_probs_from_logits_and_actions(
behaviour_policy_logits, actions, dist_class, model
)
behaviour_action_log_probs = convert_to_torch_tensor(
behaviour_action_log_probs, device="cpu"
)
behaviour_action_log_probs = force_list(behaviour_action_log_probs)
log_rhos = get_log_rhos(target_action_log_probs, behaviour_action_log_probs)
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict()
)
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size. This code
also supports the case where all tensors have the same number of additional
dimensions, e.g., `rewards` is [T, B, C], `values` is [T, B, C],
`bootstrap_value` is [B, C].
Args:
log_rhos: A float32 tensor of shape [T, B] representing the log
importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs
operations on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape [T, B] with discounts encountered
when following the behaviour policy.
rewards: A float32 tensor of shape [T, B] containing rewards generated
by following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function
estimates wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function
estimate at time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold
for importance weights (rho) when calculating the baseline targets
(vs). rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping
threshold on rho_s in
\rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
If None, no clipping is applied.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape [T, B]. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape [T, B]. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = convert_to_torch_tensor(log_rhos, device="cpu")
discounts = convert_to_torch_tensor(discounts, device="cpu")
rewards = convert_to_torch_tensor(rewards, device="cpu")
values = convert_to_torch_tensor(values, device="cpu")
bootstrap_value = convert_to_torch_tensor(bootstrap_value, device="cpu")
# Make sure tensor ranks are consistent.
rho_rank = len(log_rhos.size()) # Usually 2.
assert rho_rank == len(values.size())
assert rho_rank - 1 == len(bootstrap_value.size()), "must have rank {}".format(
rho_rank - 1
)
assert rho_rank == len(discounts.size())
assert rho_rank == len(rewards.size())
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp_max(rhos, clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp_max(rhos, 1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
vs_minus_v_xs = [torch.zeros_like(bootstrap_value)]
for i in reversed(range(len(discounts))):
discount_t, c_t, delta_t = discounts[i], cs[i], deltas[i]
vs_minus_v_xs.append(delta_t + discount_t * c_t * vs_minus_v_xs[-1])
vs_minus_v_xs = torch.stack(vs_minus_v_xs[1:])
# Reverse the results back to original order.
vs_minus_v_xs = torch.flip(vs_minus_v_xs, dims=[0])
# Add V(x_s) to get v_s.
vs = vs_minus_v_xs + values
# Advantage for policy gradient.
vs_t_plus_1 = torch.cat([vs[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp_max(rhos, clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs.detach(), pg_advantages=pg_advantages.detach())
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behavior
and target policies we compute the log_rhos for calculating the vtrace."""
t = torch.stack(target_action_log_probs)
b = torch.stack(behaviour_action_log_probs)
log_rhos = torch.sum(t - b, dim=0)
return log_rhos
| apache-2.0 |
theoryno3/scikit-learn | sklearn/cross_decomposition/cca_.py | 23 | 3087 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
pianomania/scikit-learn | examples/linear_model/plot_theilsen.py | 98 | 3846 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| bsd-3-clause |
datamade/pyhacrf | pyhacrf/feature_extraction.py | 1 | 9183 | # Authors: Dirko Coetsee
# License: 3-clause BSD
""" Implements feature extraction methods to use with HACRF models. """
import numpy as np
import functools
import itertools
class PairFeatureExtractor(object):
"""Extract features from sequence pairs.
For each feature, a grid is constructed for a sequency pair. The
features are stacked, producing a 3 dimensional matrix of
dimensions:
(length of sequence 1) X (length of sequence 2) X (number of features)
For example, a 'beginning' character feature grid for the sequences,
'kaas' and 'cheese' could look like this.
c h e e s e
k 1 1 1 1 1 1
a 1 0 0 0 0 0
a 1 0 0 0 0 0
s 1 0 0 0 0 0
These grids are made from two different types of feature
functions: real and sparse.
Real features are functions of the form:
def some_feature_function(array1, array2):
...
return feature_grid
Given two sequences, s1 and s1, return a numpy.array with dimensions
(length of array1) X (length of array2).
For performance reasons, we take advantage of numpy broadcasting, and
array1 is a column array and array2 is a row array.
For a 'matching character' feature between 'kaas' and 'cheese', the
sequences are transformed and then we use broadcasting
> array1 = numpy.array([['k'],
['a'],
['a'],
['s']])
> array2 = numpy.array([['c', 'h', 'e', 'e', 's', 'e'])
> array1 == array2
numpy.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]])
When writing you own real feature functions, you can assume that
the arrays will come in with the right shape.
Sparse feature functions look similar:
def some_feature_function(i, j, s1, s2):
...
return some_index, total_vector_length
but they always return two ints. The first is the index of the
element that should be 1 and the second is the total length of
vector. So for example if (4, 5) is returned, then the feature
vector [0, 0, 0, 0, 1] is constructed.
Parameters
----------
real: list: optional (default=[])
List of functions of the form
def some_feature_function(i, j, s1, s2):
...
return some_float
sparse: list: optional (default=[])
List of functions of the form
def some_feature_function(i, j, s1, s2):
...
return some_index, total_vector_length
"""
def __init__(self, real=None, sparse=None):
self._binary_features = []
if real:
self._binary_features = real
self._sparse_features = []
if sparse:
self._sparse_features = sparse
self.K = (len(self._binary_features)
+ sum(num_feats for _, num_feats in self._sparse_features))
def fit_transform(self, raw_X, y=None):
"""Like transform. Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models.
Parameters
----------
raw_X : List of (sequence1_n, sequence2_n) pairs, one for each training example n.
y : (ignored)
Returns
-------
X : List of numpy ndarrays, each with shape = (I_n, J_n, K), where I_n is the length of sequence1_n, J_n is the
length of sequence2_n, and K is the number of features.
Feature matrix list, for use with estimators or further transformers.
"""
return self.transform(raw_X)
def transform(self, raw_X, y=None):
"""Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models.
Parameters
----------
raw_X : List of (sequence1_n, sequence2_n) pairs, one for each training example n.
y : (ignored)
Returns
-------
X : List of numpy ndarrays, each with shape = (I_n, J_n, K), where I_n is the length of sequence1_n, J_n is the
length of sequence2_n, and K is the number of features.
Feature matrix list, for use with estimators or further transformers.
"""
return [self._extract_features(self._to_array(sequence1).T,
self._to_array(sequence2))
for sequence1, sequence2 in raw_X]
def _extract_features(self, array1, array2):
""" Helper to extract features for one data point. """
feature_array = np.zeros((array1.size, array2.size, self.K),
dtype='float64')
for k, feature_function in enumerate(self._binary_features):
feature_array[..., k] = feature_function(array1, array2)
if self._sparse_features:
array1 = array1.T[0]
array2 = array2[0]
n_binary_features = len(self._binary_features)
for i, j in np.ndindex(array1.size, array2.size):
k = n_binary_features
for feature_function, num_features in self._sparse_features:
feature_array[i, j, k + feature_function(i, j, array1, array2)] = 1.0
k += num_features
return feature_array
def _to_array(self, sequence):
return np.array(tuple(sequence), ndmin=2)
class StringPairFeatureExtractor(PairFeatureExtractor):
""" Extract features from sequence pairs.
A grid is constructed for each sequence pair, for example for ("kaas", "cheese"):
s * . . . @ .
a * . . . . .
a * . . . . .
k * * * * * *
c h e e s e
For each element in the grid, a feature vector is constructed. The elements in the feature
vector are determined by which features are active at that position in the grid. So for the
example above, the 'match' feature will be 0 in every vector in every position except the
position indicated with '@', where it will be 1. The 'start' feature will be 1 in all the
positions with '*' and 0 everywhere else.
Parameters
----------
bias: float: optional (default=1.0)
A bias term that is always added to every position in the lattice.
start: boolean: optional
Binary feature that activates at the start of either sequence.
end: boolean: optional
Binary feature that activates at the end of either sequence.
match: boolean: optional
Binary feature that activates when elements at a position are equal.
numeric: boolean, optional
Binary feature that activates when all elements at a position are numerical.
transition: boolean, optional
Adds binary features for pairs of (lower case) input characters.
"""
# Constants
CHARACTERS = 'abcdefghijklmnopqrstuvwxyz0123456789,./;\'\-=<>?:"|_+!@#$%^&*() '
def __init__(self, bias=1.0, start=False, end=False, match=False, numeric=False, transition=False):
# TODO: For longer strings, tokenize and use Levenshtein
# distance up until a lattice position. Other (possibly)
# useful features might be whether characters are consonant or
# vowel, punctuation, case.
binary_features_active = [True, start, end, match, numeric]
binary_features = [functools.partial(biases, bias=bias),
starts,
ends,
matches,
digits]
self._binary_features = [feature
for feature, active
in zip(binary_features,
binary_features_active)
if active]
self._sparse_features = []
if transition:
characters_to_index = {character: index for index, character in enumerate(self.CHARACTERS)}
curried_charIndex = functools.partial(charIndex,
char2index = characters_to_index)
self._sparse_features.append((curried_charIndex,
len(characters_to_index) ** 2))
self.K = (len(self._binary_features)
+ sum(num_feats for _, num_feats in self._sparse_features))
def _to_array(self, sequence):
return np.asarray(tuple(sequence)).reshape(1, -1)
def charIndex(i, j, s1, s2, char2index=None) :
char_i, char_j = s1[i].lower(), s2[j].lower()
index = char2index[char_j] + char2index[char_i] * len(char2index)
return index
def biases(s1, s2, bias=1.0) :
return np.full((s1.size, s2.size), bias)
def starts(s1, s2) :
M = np.zeros((s1.size, s2.size))
M[0,...] = 1
M[...,0] = 1
return M
def ends(s1, s2) :
M = np.zeros((s1.size, s2.size))
M[(s1.size-1),...] = 1
M[...,(s2.size-1)] = 1
return M
def matches(s1, s2) :
return (s1 == s2)
def digits(s1, s2) :
return np.char.isdigit(s1) & np.char.isdigit(s2)
| bsd-3-clause |
cainiaocome/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 295 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
theoryno3/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 295 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
glennq/scikit-learn | examples/manifold/plot_mds.py | 85 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
ray-project/ray | rllib/algorithms/dreamer/utils.py | 1 | 3095 | import numpy as np
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
# Custom initialization for different types of layers
if torch:
class Linear(nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
class Conv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
class ConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.zeros_(self.bias)
class GRUCell(nn.GRUCell):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_ih)
nn.init.orthogonal_(self.weight_hh)
nn.init.zeros_(self.bias_ih)
nn.init.zeros_(self.bias_hh)
# Custom Tanh Bijector due to big gradients through Dreamer Actor
class TanhBijector(torch.distributions.Transform):
def __init__(self):
super().__init__()
self.bijective = True
self.domain = torch.distributions.constraints.real
self.codomain = torch.distributions.constraints.interval(-1.0, 1.0)
def atanh(self, x):
return 0.5 * torch.log((1 + x) / (1 - x))
def sign(self):
return 1.0
def _call(self, x):
return torch.tanh(x)
def _inverse(self, y):
y = torch.where(
(torch.abs(y) <= 1.0), torch.clamp(y, -0.99999997, 0.99999997), y
)
y = self.atanh(y)
return y
def log_abs_det_jacobian(self, x, y):
return 2.0 * (np.log(2) - x - nn.functional.softplus(-2.0 * x))
# Modified from https://github.com/juliusfrost/dreamer-pytorch
class FreezeParameters:
def __init__(self, parameters):
self.parameters = parameters
self.param_states = [p.requires_grad for p in self.parameters]
def __enter__(self):
for param in self.parameters:
param.requires_grad = False
def __exit__(self, exc_type, exc_val, exc_tb):
for i, param in enumerate(self.parameters):
param.requires_grad = self.param_states[i]
def batchify_states(states_list, batch_size, device=None):
"""
Batchify data into batches of size batch_size
"""
state_batches = [s[None, :].expand(batch_size, -1) for s in states_list]
if device is not None:
state_batches = [s.to(device) for s in state_batches]
return state_batches
| apache-2.0 |
snap-stanford/ogb | examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/models/infer.py | 1 | 13505 | # -*- coding: utf-8 -*-
#
# train.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import argparse
import numpy as np
import dgl.backend as F
backend = os.environ.get('DGLBACKEND', 'pytorch')
from .general_models import InferModel
if backend.lower() == 'mxnet':
from .mxnet.tensor_models import logsigmoid
from .mxnet.tensor_models import none
from .mxnet.tensor_models import get_dev
from .mxnet.tensor_models import cosine_dist
from .mxnet.tensor_models import l2_dist
from .mxnet.tensor_models import l1_dist
from .mxnet.tensor_models import dot_dist
from .mxnet.tensor_models import extended_jaccard_dist
from .mxnet.tensor_models import floor_divide
DEFAULT_INFER_BATCHSIZE = 256
else:
from .pytorch.tensor_models import logsigmoid
from .pytorch.tensor_models import none
from .pytorch.tensor_models import get_dev
from .pytorch.tensor_models import cosine_dist
from .pytorch.tensor_models import l2_dist
from .pytorch.tensor_models import l1_dist
from .pytorch.tensor_models import dot_dist
from .pytorch.tensor_models import extended_jaccard_dist
from .pytorch.tensor_models import floor_divide
DEFAULT_INFER_BATCHSIZE = 1024
class ScoreInfer(object):
""" Calculate score of triplet (h, r, t) based on pretained KG embeddings
using specified score_function
Parameters
---------
device : int
Device to run the inference, -1 for CPU
config : dict
Containing KG model information
model_path : str
path storing the model (pretrained embeddings)
score_func : str
What kind of score is used,
none: score = $x$
logsigmoid: score $log(sigmoid(x))
"""
def __init__(self, device, config, model_path, sfunc='none'):
assert sfunc in ['none', 'logsigmoid'], 'score function should be none or logsigmoid'
self.device = 'cpu' if device < 0 else device
self.config = config
self.model_path = model_path
self.sfunc = sfunc
if sfunc == 'none':
self.score_func = none
else:
self.score_func = logsigmoid
def load_model(self):
config = self.config
model_path = self.model_path
# for none score func, use 0.
# for logsigmoid use original gamma to make the score closer to 0.
gamma=config['gamma'] if self.sfunc == 'logsigmoid' else 0.0
model = InferModel(device=self.device,
model_name=config['model'],
hidden_dim=config['emb_size'],
double_entity_emb=config['double_ent'],
double_relation_emb=config['double_rel'],
gamma=gamma)
dataset = config['dataset']
model.load_emb(model_path, dataset)
self.model = model
def topK(self, head=None, rel=None, tail=None, exec_mode='all', k=10):
if head is None:
head = F.arange(0, self.model.num_entity)
else:
head = F.tensor(head)
if rel is None:
rel = F.arange(0, self.model.num_rel)
else:
rel = F.tensor(rel)
if tail is None:
tail = F.arange(0, self.model.num_entity)
else:
tail = F.tensor(tail)
num_head = F.shape(head)[0]
num_rel = F.shape(rel)[0]
num_tail = F.shape(tail)[0]
if exec_mode == 'triplet_wise':
result = []
assert num_head == num_rel, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
assert num_head == num_tail, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
raw_score = self.model.score(head, rel, tail, triplet_wise=True)
score = self.score_func(raw_score)
idx = F.arange(0, num_head)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
result.append((F.asnumpy(head[idx]),
F.asnumpy(rel[idx]),
F.asnumpy(tail[idx]),
F.asnumpy(score)))
elif exec_mode == 'all':
result = []
raw_score = self.model.score(head, rel, tail)
score = self.score_func(raw_score)
idx = F.arange(0, num_head * num_rel * num_tail)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
F.asnumpy(rel[rel_idx]),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
elif exec_mode == 'batch_head':
result = []
for i in range(num_head):
raw_score = self.model.score(F.unsqueeze(head[i], 0), rel, tail)
score = self.score_func(raw_score)
idx = F.arange(0, num_rel * num_tail)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
result.append((np.full((k,), F.asnumpy(head[i])),
F.asnumpy(rel[rel_idx]),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
elif exec_mode == 'batch_rel':
result = []
for i in range(num_rel):
raw_score = self.model.score(head, F.unsqueeze(rel[i], 0), tail)
score = self.score_func(raw_score)
idx = F.arange(0, num_head * num_tail)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
np.full((k,), F.asnumpy(rel[i])),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
elif exec_mode == 'batch_tail':
result = []
for i in range(num_tail):
raw_score = self.model.score(head, rel, F.unsqueeze(tail[i], 0))
score = self.score_func(raw_score)
idx = F.arange(0, num_head * num_rel)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
F.asnumpy(rel[rel_idx]),
np.full((k,), F.asnumpy(tail[i])),
F.asnumpy(score)))
else:
assert False, 'unknow execution mode type {}'.format(exec_mode)
return result
class EmbSimInfer():
""" Calculate simularity of entity/relation embeddings based on pretained KG embeddings
Parameters
---------
device : int
Device to run the inference, -1 for CPU
emb_file : dict
Containing embedding information
sfunc : str
What kind of score is used,
cosine: score = $\frac{x \cdot y}{||x||_2||y||_2}$
l2: score = $-||x - y||_2$
l1: score = $-||x - y||_1$
dot: score = $x \cdot y$
ext_jaccard: score = $\frac{x \cdot y}{||x||_{2}^{2} + ||y||_{2}^{2} - x \cdot y}$
"""
def __init__(self, device, emb_file, sfunc='cosine', batch_size=DEFAULT_INFER_BATCHSIZE):
self.device = get_dev(device)
self.emb_file = emb_file
self.sfunc = sfunc
if sfunc == 'cosine':
self.sim_func = cosine_dist
elif sfunc == 'l2':
self.sim_func = l2_dist
elif sfunc == 'l1':
self.sim_func = l1_dist
elif sfunc == 'dot':
self.sim_func = dot_dist
elif sfunc == 'ext_jaccard':
self.sim_func = extended_jaccard_dist
self.batch_size = batch_size
def load_emb(self):
self.emb = F.tensor(np.load(self.emb_file))
def topK(self, head=None, tail=None, bcast=False, pair_ws=False, k=10):
if head is None:
head = F.arange(0, self.emb.shape[0])
else:
head = F.tensor(head)
if tail is None:
tail = F.arange(0, self.emb.shape[0])
else:
tail = F.tensor(tail)
head_emb = self.emb[head]
tail_emb = self.emb[tail]
if pair_ws is True:
result = []
batch_size = self.batch_size
# chunked cal score
score = []
num_head = head.shape[0]
num_tail = tail.shape[0]
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sh_emb = F.copy_to(sh_emb, self.device)
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = F.copy_to(st_emb, self.device)
score.append(F.copy_to(self.sim_func(sh_emb, st_emb, pw=True), F.cpu()))
score = F.cat(score, dim=0)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
result.append((F.asnumpy(head[sidx]),
F.asnumpy(tail[sidx]),
F.asnumpy(score)))
else:
num_head = head.shape[0]
num_tail = tail.shape[0]
batch_size = self.batch_size
# chunked cal score
score = []
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sh_emb = F.copy_to(sh_emb, self.device)
s_score = []
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
st_emb = F.copy_to(st_emb, self.device)
s_score.append(F.copy_to(self.sim_func(sh_emb, st_emb), F.cpu()))
score.append(F.cat(s_score, dim=1))
score = F.cat(score, dim=0)
if bcast is False:
result = []
idx = F.arange(0, num_head * num_tail)
score = F.reshape(score, (num_head * num_tail, ))
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
sidx = sidx
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
else: # bcast at head
result = []
for i in range(num_head):
i_score = score[i]
sidx = F.argsort(i_score, dim=0, descending=True)
idx = F.arange(0, num_tail)
i_idx = sidx[:k]
i_score = i_score[i_idx]
idx = idx[i_idx]
result.append((np.full((k,), F.asnumpy(head[i])),
F.asnumpy(tail[idx]),
F.asnumpy(i_score)))
return result
| mit |
pianomania/scikit-learn | sklearn/preprocessing/__init__.py | 265 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/linear_model/coordinate_descent.py | 42 | 73973 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
theoryno3/scikit-learn | benchmarks/bench_lasso.py | 295 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
GoogleCloudPlatform/cloudml-samples | pytorch/containers/custom_container/task.py | 1 | 4919 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
import data_utils
import model
def train(net, train_loader, optimizer, epoch):
"""Create the training loop"""
net.train()
criterion = nn.BCELoss()
running_loss = 0.0
for batch_index, data in enumerate(train_loader):
features = data['features']
target = data['target']
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(features)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if batch_index % 6 == 5: # print every 6 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch, batch_index + 1, running_loss / 6))
running_loss = 0.0
def test(net, test_loader):
"""Test the DNN"""
net.eval()
criterion = nn.BCELoss() # https://pytorch.org/docs/stable/nn.html#bceloss
test_loss = 0
correct = 0
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
features = data['features']
target = data['target']
output = net(features)
# Binarize the output
pred = output.apply_(lambda x: 0.0 if x < 0.5 else 1.0)
test_loss += criterion(output, target) # sum up batch loss
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
correct,
(len(test_loader) * test_loader.batch_size),
100. * correct / (len(test_loader) * test_loader.batch_size)))
def train_model(args):
"""Load the data, train the model, test the model, export / save the model
"""
torch.manual_seed(args.seed)
# Open our dataset
train_loader, test_loader = data_utils.load_data(args.test_split,
args.batch_size)
# Create the model
net = model.SonarDNN().double()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=args.momentum, nesterov=False)
# Train / Test the model
for epoch in range(1, args.epochs + 1):
train(net, train_loader, optimizer, epoch)
test(net, test_loader)
# Export the trained model
torch.save(net.state_dict(), args.model_name)
if args.model_dir:
# Save the model to GCS
data_utils.save_model(args.model_dir, args.model_name)
def get_args():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser(description='PyTorch Sonar Example')
parser.add_argument('--model-dir',
type=str,
help='Where to save the model')
parser.add_argument('--model-name',
type=str,
default='sonar_model',
help='What to name the saved model file')
parser.add_argument('--batch-size',
type=int,
default=4,
help='input batch size for training (default: 4)')
parser.add_argument('--test-split',
type=float,
default=0.2,
help='split size for training / testing dataset')
parser.add_argument('--epochs',
type=int,
default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr',
type=float,
default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum',
type=float,
default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--seed',
type=int,
default=42,
help='random seed (default: 42)')
args = parser.parse_args()
return args
def main():
args = get_args()
train_model(args)
if __name__ == '__main__':
main()
| apache-2.0 |
SMV818VMS/diffraction | findmotifs.py | 1 | 6730 | #/usr/bin/envimport sys, os
#############################################################
#
# findmotifs.py
#
# Author : Miravet-Verde, Samuel
# Written : 04/20/2016
# Last updated : 04/21/2016
#
# Test to detect motifs from sequences given by finddrops
#
#############################################################
#####################
# PACKAGES LOAD #
#####################
import sys, os
import itertools
import numpy as np
import glob
from scipy.cluster.vq import kmeans, vq
from finddrops import find_drops, bruto_drops
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio.SeqRecord import SeqRecord
#####################
# GENERAL FUNCTIONS #
#####################
def maxgap(data, max_gap):
"""
Arrange data into groups where successive elements
differ by no more than *maxgap*
The input has to be a list of list with the structure:
[['id1', distance between start decay and last expression],['id2',dist2],...,['idn', distn]]
The output will be a list of lists with the identifiers clustered together by the distances if
the difference between them is less than the maxgap
[[id1, id2],[id3,..]]
Example:
in: [['id1',1], ['id2',-1], ['id3',2], ['id4',80], ['id5',81], ['id3',82]]
out: [['id1','id2','id3'], ['id4','id5','id3']]
"""
# Sort the list by the second value (distances)
data.sort(key=lambda x: x[1])
# Separate lists
identifiers = [x[0] for x in data]
distances = [x[1] for x in data]
# Cluster the distances by max gap and return the identifiers groupes
groups = []
for k, g in itertools.groupby(distances, key=lambda n: n//max_gap):
i = len(list(g))
groups.append(identifiers[:i])
identifiers = identifiers[i:]
return groups
def kmeans(data, k):
"""
Arrange data into k groups based on a kmeans approach
The input has to be a list of list with the structure:
[['id1', distance between start decay and last expression],['id2',dist2],...,['idn', distn]]
The output will be a list of lists with the identifiers clustered together by the distances if
the difference between them is less than the maxgap
[[id1, id2],[id3,..]]
Example:
in: [['id1',1], ['id2',-1], ['id3',2], ['id4',80], ['id5',81], ['id3',82]]
out: [['id1','id2','id3'], ['id4','id5','id3']]
"""
# Sort the list by the second value (distances)
data.sort(key=lambda x: x[1])
# Separate lists
identifiers = [x[0] for x in data]
distances = [x[1] for x in data]
# Cluster the distances by max gap and return the identifiers groupes
groups = []
for k, g in itertools.groupby(distances, key=lambda n: n//max_gap):
i = len(list(g))
groups.append(identifiers[:i])
identifiers = identifiers[i:]
return groups
def group_by(data, method, max_gap=10, k=2):
"""
Define the number of groups in your dataset and labels them
Possible methods:
- maxgap
- kmeans
- hierar
----
max_gap: maximum distance expected between features in maxgap method
k: is the number of groups expected, used by some of the methods
"""
if method == 'maxgap':
return maxgap(data, max_gap)
def subset_dictionary(bigdict, wanted_keys):
"""
Given a dictionary (bigdict) including several keys and a list of keys
in list format in wanted_keys
Return a subsetted dictionary from bigdict with only wanted keys
"""
return dict((k, bigdict[k]) for k in wanted_keys if k in bigdict)
def process_sequences(dictionary, genome, outFile):
"""
Given a dictionary with {identifier:[features]}
where feature[0] is the start index of a sequence and feature[1] is the end
Creates a multifasta file to run a meme analysis
"""
# The goal is extract several subsequences from a genome given
# we load the genome
with open(outFile, 'w') as f:
for seq_record in SeqIO.parse(genome, 'fasta'):
for k, v in dictionary.iteritems():
start = v[0]
end = v[1]
f.write('>'+k+'_'+str(seq_record.id)+'\n')
f.write(str(seq_record.seq[start:end])+'\n')
def pymeme(directory):
"""
Given a directory,
List all the fasta files in it and runs a MEME for each file
By default, the program creates a directory with the name of the fasta where
all the results will be located
"""
print('finding files...')
files = glob.glob(directory+"*.fasta")
print(str(len(files))+' files found')
for fil in files:
outname = fil.replace(directory,'').replace('.fasta','')
print('meme of '+outname)
command = 'meme '+fil+' -dna -oc ./results_meme/'+outname+'/ -nostatus -time 18000 -maxsize 60000 -mod zoops -nmotifs 5 -minw 6 -maxw 100'
os.system(command)
#####################
# CLASSES #
#####################
#####################
# EXECUTION #
#####################
if __name__ == "__main__":
# Generate the list of candidates
# candidates = find_drops(annotation_file='./datasets/toyset_annotations.txt', expression_file='./datasets/toyset.txt', expression_index=1, header_exp=False)
candidates = bruto_drops(annotation_file='../mycorepo/plusTSSTTS.csv', expression_file='./datasets/dsspilesmpn.txt', additional_id = '_bruto', expression_index=2, expression_determinant=10)
# This is a dictionary, to group we have to select a feature explaining more differences, in this dictionary we have different features:
# 'SIGN7': [6986, 7211, 7001, 7087, 48.566111083270691, -68.0, -4.6440034020500001]
# ident : [start, end,decay_p, 0exp, std expression, max_change, diff exp btw last and 0exp]
# The most specific value for classes is the difference between decay position and 0exp
# Reformat the dictionary
data = []
for key, values in candidates.iteritems():
distance = values[3]-values[2]
data.append([key, distance])
# # Group by
# groups = group_by(data, 'maxgap')
#
# # Extract subsequences and write them in separate files:
# sharp_signals = subset_dictionary(candidates, groups[0])
# decay_signals = subset_dictionary(candidates, groups[1])
#
# process_sequences(sharp_signals, './datasets/toy_genome.fasta', './results_meme/input_sequences/sharp_signals.fasta')
# process_sequences(decay_signals, './datasets/toy_genome.fasta', './results_meme/input_sequences/decay_signals.fasta')
process_sequences(candidates, './datasets/mpn_genome.fasta', './results_meme/input_sequences/bruto_test.fasta')
# Run pymeme
pymeme('./results_meme/input_sequences/')
| mit |
szilveszter/django | django/contrib/gis/geoip/base.py | 67 | 11157 | import os
import re
from ctypes import c_char_p
from django.core.validators import ipv4_re
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIP_open, GeoIP_delete, GeoIP_database_info,
GeoIP_lib_version, GeoIP_record_by_addr, GeoIP_record_by_name,
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name)
from django.utils import six
from django.utils.encoding import force_bytes
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
#### GeoIP classes ####
class GeoIPException(Exception):
pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = dict((opt, None) for opt in (0, 1, 2, 4, 8))
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if GeoIP_delete is None:
return
if self._country:
GeoIP_delete(self._country)
if self._city:
GeoIP_delete(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP only takes bytestrings.
return force_bytes(query)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(enc_query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(enc_query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, enc_query)
else:
return GeoIP_country_code_by_name(self._country, enc_query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, enc_query)
else:
return GeoIP_country_name_by_name(self._country, enc_query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code': self.country_code(query),
'country_name': self.country_name(query),
}
#### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
#### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Retuns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
#### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| bsd-3-clause |
jdmcbr/geopandas | geopandas/tools/sjoin.py | 1 | 19493 | from typing import Optional
import warnings
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame
from geopandas import _compat as compat
from geopandas.array import _check_crs, _crs_mismatch_warn
def sjoin(
left_df,
right_df,
how="inner",
predicate="intersects",
lsuffix="left",
rsuffix="right",
**kwargs,
):
"""Spatial join of two GeoDataFrames.
See the User Guide page :doc:`../../user_guide/mergingdata` for details.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
predicate : string, default 'intersects'
Binary predicate. Valid values are determined by the spatial index used.
You can check the valid values in left_df or right_df as
``left_df.sindex.valid_query_predicates`` or
``right_df.sindex.valid_query_predicates``
Replaces deprecated ``op`` parameter.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
Examples
--------
>>> countries = geopandas.read_file(geopandas.datasets.get_\
path("naturalearth_lowres"))
>>> cities = geopandas.read_file(geopandas.datasets.get_path("naturalearth_cities"))
>>> countries.head() # doctest: +SKIP
pop_est continent name \
iso_a3 gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 POLYGON (\
(-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -120.0000...
>>> cities.head()
name geometry
0 Vatican City POINT (12.45339 41.90328)
1 San Marino POINT (12.44177 43.93610)
2 Vaduz POINT (9.51667 47.13372)
3 Luxembourg POINT (6.13000 49.61166)
4 Palikir POINT (158.14997 6.91664)
>>> cities_w_country_data = geopandas.sjoin(cities, countries)
>>> cities_w_country_data.head() # doctest: +SKIP
name_left geometry index_right pop_est continent name_\
right iso_a3 gdp_md_est
0 Vatican City POINT (12.45339 41.90328) 141 62137802 Europe \
Italy ITA 2221000.0
1 San Marino POINT (12.44177 43.93610) 141 62137802 Europe \
Italy ITA 2221000.0
192 Rome POINT (12.48131 41.89790) 141 62137802 Europe \
Italy ITA 2221000.0
2 Vaduz POINT (9.51667 47.13372) 114 8754413 Europe Au\
stria AUT 416600.0
184 Vienna POINT (16.36469 48.20196) 114 8754413 Europe Au\
stria AUT 416600.0
See also
--------
overlay : overlay operation resulting in a new geometry
GeoDataFrame.sjoin : equivalent method
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
if "op" in kwargs:
op = kwargs.pop("op")
deprecation_message = (
"The `op` parameter is deprecated and will be removed"
" in a future release. Please use the `predicate` parameter"
" instead."
)
if predicate != "intersects" and op != predicate:
override_message = (
"A non-default value for `predicate` was passed"
f' (got `predicate="{predicate}"`'
f' in combination with `op="{op}"`).'
" The value of `predicate` will be overriden by the value of `op`,"
" , which may result in unexpected behavior."
f"\n{deprecation_message}"
)
warnings.warn(override_message, UserWarning, stacklevel=4)
else:
warnings.warn(deprecation_message, FutureWarning, stacklevel=4)
predicate = op
if kwargs:
first = next(iter(kwargs.keys()))
raise TypeError(f"sjoin() got an unexpected keyword argument '{first}'")
_basic_checks(left_df, right_df, how, lsuffix, rsuffix)
indices = _geom_predicate_query(left_df, right_df, predicate)
joined = _frame_join(indices, left_df, right_df, how, lsuffix, rsuffix)
return joined
def _basic_checks(left_df, right_df, how, lsuffix, rsuffix):
"""Checks the validity of join input parameters.
`how` must be one of the valid options.
`'index_'` concatenated with `lsuffix` or `rsuffix` must not already
exist as columns in the left or right data frames.
Parameters
------------
left_df : GeoDataFrame
right_df : GeoData Frame
how : str, one of 'left', 'right', 'inner'
join type
lsuffix : str
left index suffix
rsuffix : str
right index suffix
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "{}" but is expected to be in {}'.format(how, allowed_hows)
)
if not _check_crs(left_df, right_df):
_crs_mismatch_warn(left_df, right_df, stacklevel=4)
index_left = "index_{}".format(lsuffix)
index_right = "index_{}".format(rsuffix)
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right)
)
def _geom_predicate_query(left_df, right_df, predicate):
"""Compute geometric comparisons and get matching indices.
Parameters
----------
left_df : GeoDataFrame
right_df : GeoDataFrame
predicate : string
Binary predicate to query.
Returns
-------
DataFrame
DataFrame with matching indices in
columns named `_key_left` and `_key_right`.
"""
with warnings.catch_warnings():
# We don't need to show our own warning here
# TODO remove this once the deprecation has been enforced
warnings.filterwarnings(
"ignore", "Generated spatial index is empty", FutureWarning
)
original_predicate = predicate
if predicate == "within":
# within is implemented as the inverse of contains
# contains is a faster predicate
# see discussion at https://github.com/geopandas/geopandas/pull/1421
predicate = "contains"
sindex = left_df.sindex
input_geoms = right_df.geometry
else:
# all other predicates are symmetric
# keep them the same
sindex = right_df.sindex
input_geoms = left_df.geometry
if sindex:
l_idx, r_idx = sindex.query_bulk(input_geoms, predicate=predicate, sort=False)
indices = pd.DataFrame({"_key_left": l_idx, "_key_right": r_idx})
else:
# when sindex is empty / has no valid geometries
indices = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
if original_predicate == "within":
# within is implemented as the inverse of contains
# flip back the results
indices = indices.rename(
columns={"_key_left": "_key_right", "_key_right": "_key_left"}
)
return indices
def _frame_join(join_df, left_df, right_df, how, lsuffix, rsuffix):
"""Join the GeoDataFrames at the DataFrame level.
Parameters
----------
join_df : DataFrame
Indices and join data returned by the geometric join.
Must have columns `_key_left` and `_key_right`
with integer indices representing the matches
from `left_df` and `right_df` respectively.
Additional columns may be included and will be copied to
the resultant GeoDataFrame.
left_df : GeoDataFrame
right_df : GeoDataFrame
lsuffix : string
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string
Suffix to apply to overlapping column names (right GeoDataFrame).
how : string
The type of join to use on the DataFrame level.
Returns
-------
GeoDataFrame
Joined GeoDataFrame.
"""
# the spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
index_left = "index_{}".format(lsuffix)
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_{}".format(lsuffix + str(pos))
for pos, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
index_right = "index_{}".format(rsuffix)
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_{}".format(rsuffix + str(pos))
for pos, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
# perform join on the dataframes
if how == "inner":
join_df = join_df.set_index("_key_left")
joined = (
left_df.merge(join_df, left_index=True, right_index=True)
.merge(
right_df.drop(right_df.geometry.name, axis=1),
left_on="_key_right",
right_index=True,
suffixes=("_{}".format(lsuffix), "_{}".format(rsuffix)),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
elif how == "left":
join_df = join_df.set_index("_key_left")
joined = (
left_df.merge(join_df, left_index=True, right_index=True, how="left")
.merge(
right_df.drop(right_df.geometry.name, axis=1),
how="left",
left_on="_key_right",
right_index=True,
suffixes=("_{}".format(lsuffix), "_{}".format(rsuffix)),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
else: # how == 'right':
joined = (
left_df.drop(left_df.geometry.name, axis=1)
.merge(
join_df.merge(
right_df, left_on="_key_right", right_index=True, how="right"
),
left_index=True,
right_on="_key_left",
how="right",
suffixes=("_{}".format(lsuffix), "_{}".format(rsuffix)),
)
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
if isinstance(index_right, list):
joined.index.names = right_index_name
else:
joined.index.name = right_index_name
return joined
def _nearest_query(
left_df: GeoDataFrame,
right_df: GeoDataFrame,
max_distance: float,
how: str,
return_distance: bool,
):
if not (compat.PYGEOS_GE_010 and compat.USE_PYGEOS):
raise NotImplementedError(
"Currently, only PyGEOS >= 0.10.0 supports `nearest_all`. "
+ compat.INSTALL_PYGEOS_ERROR
)
# use the opposite of the join direction for the index
use_left_as_sindex = how == "right"
if use_left_as_sindex:
sindex = left_df.sindex
query = right_df.geometry
else:
sindex = right_df.sindex
query = left_df.geometry
if sindex:
res = sindex.nearest(
query,
return_all=True,
max_distance=max_distance,
return_distance=return_distance,
)
if return_distance:
(input_idx, tree_idx), distances = res
else:
(input_idx, tree_idx) = res
distances = None
if use_left_as_sindex:
l_idx, r_idx = tree_idx, input_idx
sort_order = np.argsort(l_idx, kind="stable")
l_idx, r_idx = l_idx[sort_order], r_idx[sort_order]
if distances is not None:
distances = distances[sort_order]
else:
l_idx, r_idx = input_idx, tree_idx
join_df = pd.DataFrame(
{"_key_left": l_idx, "_key_right": r_idx, "distances": distances}
)
else:
# when sindex is empty / has no valid geometries
join_df = pd.DataFrame(
columns=["_key_left", "_key_right", "distances"], dtype=float
)
return join_df
def sjoin_nearest(
left_df: GeoDataFrame,
right_df: GeoDataFrame,
how: str = "inner",
max_distance: Optional[float] = None,
lsuffix: str = "left",
rsuffix: str = "right",
distance_col: Optional[str] = None,
) -> GeoDataFrame:
"""Spatial join of two GeoDataFrames based on the distance between their geometries.
Results will include multiple output records for a single input record
where there are multiple equidistant nearest or intersected neighbors.
See the User Guide page
https://geopandas.readthedocs.io/en/latest/docs/user_guide/mergingdata.html
for more details.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
max_distance : float, default None
Maximum distance within which to query for nearest geometry.
Must be greater than 0.
The max_distance used to search for nearest items in the tree may have a
significant impact on performance by reducing the number of input
geometries that are evaluated for nearest items in the tree.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
distance_col : string, default None
If set, save the distances computed between matching geometries under a
column of this name in the joined GeoDataFrame.
Examples
--------
>>> countries = geopandas.read_file(geopandas.datasets.get_\
path("naturalearth_lowres"))
>>> cities = geopandas.read_file(geopandas.datasets.get_path("naturalearth_cities"))
>>> countries.head(2).name # doctest: +SKIP
pop_est continent name \
iso_a3 gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
>>> cities.head(2).name # doctest: +SKIP
name geometry
0 Vatican City POINT (12.45339 41.90328)
1 San Marino POINT (12.44177 43.93610)
>>> cities_w_country_data = geopandas.sjoin_nearest(cities, countries)
>>> cities_w_country_data[['name_left', 'name_right']].head(2) # doctest: +SKIP
name_left geometry index_right pop_est continent name_\
right iso_a3 gdp_md_est
0 Vatican City POINT (12.45339 41.90328) 141 62137802 Europe \
Italy ITA 2221000.0
1 San Marino POINT (12.44177 43.93610) 141 62137802 Europe \
Italy ITA 2221000.0
To include the distances:
>>> cities_w_country_data = geopandas.sjoin_nearest\
(cities, countries, distance_col="distances")
>>> cities_w_country_data[["name_left", "name_right", \
"distances"]].head(2) # doctest: +SKIP
name_left name_right distances
0 Vatican City Italy 0.0
1 San Marino Italy 0.0
In the following example, we get multiple cities for Italy because all results are
equidistant (in this case zero because they intersect).
In fact, we get 3 results in total:
>>> countries_w_city_data = geopandas.sjoin_nearest\
(cities, countries, distance_col="distances", how="right")
>>> italy_results = \
countries_w_city_data[countries_w_city_data["name_left"] == "Italy"]
>>> italy_results # doctest: +SKIP
name_x name_y
141 Vatican City Italy
141 San Marino Italy
141 Rome Italy
See also
--------
sjoin : binary predicate joins
GeoDataFrame.sjoin_nearest : equivalent method
Notes
-----
Since this join relies on distances, results will be innaccurate
if your geometries are in a geographic CRS.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
_basic_checks(left_df, right_df, how, lsuffix, rsuffix)
left_df.geometry.values.check_geographic_crs(stacklevel=1)
right_df.geometry.values.check_geographic_crs(stacklevel=1)
return_distance = distance_col is not None
join_df = _nearest_query(left_df, right_df, max_distance, how, return_distance)
if return_distance:
join_df = join_df.rename(columns={"distances": distance_col})
else:
join_df.pop("distances")
joined = _frame_join(join_df, left_df, right_df, how, lsuffix, rsuffix)
if return_distance:
columns = [c for c in joined.columns if c != distance_col] + [distance_col]
joined = joined[columns]
return joined
| bsd-3-clause |
theoryno3/scikit-learn | examples/cluster/plot_mean_shift.py | 348 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
snap-stanford/ogb | ogb/linkproppred/dataset_dgl.py | 1 | 7161 | import pandas as pd
import shutil, os
import os.path as osp
import torch
import numpy as np
from dgl.data.utils import load_graphs, save_graphs, Subset
import dgl
from ogb.utils.url import decide_download, download_url, extract_zip
from ogb.io.read_graph_dgl import read_graph_dgl, read_heterograph_dgl
from ogb.utils.torch_util import replace_numpy_with_torchtensor
class DglLinkPropPredDataset(object):
'''Adapted from https://docs.dgl.ai/en/latest/_modules/dgl/data/chem/csv_dataset.html#CSVDataset'''
def __init__(self, name, root = 'dataset', meta_dict=None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbl-ppa
if meta_dict is None:
self.dir_name = '_'.join(name.split('-'))
# check if previously-downloaded folder exists.
# If so, use that one.
if osp.exists(osp.join(root, self.dir_name + '_dgl')):
self.dir_name = self.dir_name + '_dgl'
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
# check version
# First check whether the dataset has been already downloaded or not.
# If so, check whether the dataset version is the newest or not.
# If the dataset is not the newest version, notify this to the user.
if osp.isdir(self.root) and (not osp.exists(osp.join(self.root, 'RELEASE_v' + str(self.meta_info['version']) + '.txt'))):
print(self.name + ' has been updated.')
if input('Will you update the dataset now? (y/N)\n').lower() == 'y':
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., ppassoc
self.task_type = self.meta_info['task type']
self.eval_metric = self.meta_info['eval metric']
self.is_hetero = self.meta_info['is hetero'] == 'True'
self.binary = self.meta_info['binary'] == 'True'
super(DglLinkPropPredDataset, self).__init__()
self.pre_process()
def pre_process(self):
processed_dir = osp.join(self.root, 'processed')
pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')
if osp.exists(pre_processed_file_path):
self.graph, _ = load_graphs(pre_processed_file_path)
else:
### check if the downloaded file exists
if self.binary:
# npz format
has_necessary_file_simple = osp.exists(osp.join(self.root, 'raw', 'data.npz')) and (not self.is_hetero)
has_necessary_file_hetero = osp.exists(osp.join(self.root, 'raw', 'edge_index_dict.npz')) and self.is_hetero
else:
# csv file
has_necessary_file_simple = osp.exists(osp.join(self.root, 'raw', 'edge.csv.gz')) and (not self.is_hetero)
has_necessary_file_hetero = osp.exists(osp.join(self.root, 'raw', 'triplet-type-list.csv.gz')) and self.is_hetero
has_necessary_file = has_necessary_file_simple or has_necessary_file_hetero
if not has_necessary_file:
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
# delete folder if there exists
try:
shutil.rmtree(self.root)
except:
pass
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop download.')
exit(-1)
raw_dir = osp.join(self.root, 'raw')
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
### pre-process and save
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.is_hetero:
graph = read_heterograph_dgl(raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)[0]
else:
graph = read_graph_dgl(raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)[0]
print('Saving...')
save_graphs(pre_processed_file_path, graph, {})
self.graph, _ = load_graphs(pre_processed_file_path)
def get_edge_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'train.pt')))
valid = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'valid.pt')))
test = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'test.pt')))
return {'train': train, 'valid': valid, 'test': test}
def __getitem__(self, idx):
assert idx == 0, 'This dataset has only one graph'
return self.graph[0]
def __len__(self):
return 1
def __repr__(self): # pragma: no cover
return '{}({})'.format(self.__class__.__name__, len(self))
if __name__ == '__main__':
dgl_dataset = DglLinkPropPredDataset(name = 'ogbl-collab')
split_edge = dgl_dataset.get_edge_split()
print(dgl_dataset[0])
print(split_edge['train'].keys())
print(split_edge['valid'].keys())
| mit |
astocko/statsmodels | statsmodels/tsa/base/tests/test_base.py | 27 | 2106 | import numpy as np
from pandas import Series
from pandas import date_range
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
def test_pandas_nodates_index():
from statsmodels.datasets import sunspots
y = sunspots.load_pandas().data.SUNACTIVITY
npt.assert_raises(ValueError, TimeSeriesModel, y)
def test_predict_freq():
# test that predicted dates have same frequency
x = np.arange(1,36.)
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
#npt.assert_(model.data.freq == "AS-APR")
npt.assert_(model.data.freq == "A-APR")
start = model._get_predict_start("2006-4-30")
end = model._get_predict_end("2016-4-30")
model._make_predict_dates()
predict_dates = model.data.predict_dates
#expected_dates = date_range("2006-12-31", "2016-12-31",
# freq="AS-APR")
expected_dates = date_range("2006-4-30", "2016-4-30", freq="A-APR")
assert_equal(predict_dates, expected_dates)
#ptesting.assert_series_equal(predict_dates, expected_dates)
def test_keyerror_start_date():
x = np.arange(1,36.)
from pandas import date_range
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
npt.assert_raises(ValueError, model._get_predict_start, "1970-4-30")
def test_period_index():
# test 1285
from pandas import PeriodIndex, TimeSeries
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
x = np.arange(1, 21.)
model = TimeSeriesModel(Series(x, index=dates))
npt.assert_(model.data.freq == "M")
model = TimeSeriesModel(TimeSeries(x, index=dates))
npt.assert_(model.data.freq == "M")
| bsd-3-clause |
ray-project/ray | rllib/examples/inference_and_serving/policy_inference_after_training.py | 1 | 3765 | """
Example showing how you can use your trained policy for inference
(computing actions) in an environment.
Includes options for LSTM-based models (--use-lstm), attention-net models
(--use-attention), and plain (non-recurrent) models.
"""
import argparse
import gym
import os
import ray
from ray import air, tune
from ray.rllib.algorithms.algorithm import Algorithm
from ray.tune.registry import get_trainable_cls
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument("--eager-tracing", action="store_true")
parser.add_argument(
"--stop-iters",
type=int,
default=200,
help="Number of iterations to train before we do inference.",
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train before we do inference.",
)
parser.add_argument(
"--stop-reward",
type=float,
default=150.0,
help="Reward at which we stop training before we do inference.",
)
parser.add_argument(
"--explore-during-inference",
action="store_true",
help="Whether the trained policy should use exploration during action "
"inference.",
)
parser.add_argument(
"--num-episodes-during-inference",
type=int,
default=10,
help="Number of episodes to do inference over after training.",
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
config = (
get_trainable_cls(args.run)
.get_default_config()
.environment("FrozenLake-v1")
# Run with tracing enabled for tf2?
.framework(args.framework, eager_tracing=args.eager_tracing)
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
print("Training policy until desired reward/timesteps/iterations. ...")
tuner = tune.Tuner(
args.run,
param_space=config.to_dict(),
run_config=air.RunConfig(
stop=stop,
verbose=2,
checkpoint_config=air.CheckpointConfig(
checkpoint_frequency=1, checkpoint_at_end=True
),
),
)
results = tuner.fit()
print("Training completed. Restoring new Trainer for action inference.")
# Get the last checkpoint from the above training run.
checkpoint = results.get_best_result().checkpoint
# Create new Algorithm and restore its state from the last checkpoint.
algo = Algorithm.from_checkpoint(checkpoint)
# Create the env to do inference in.
env = gym.make("FrozenLake-v1")
obs = env.reset()
num_episodes = 0
episode_reward = 0.0
while num_episodes < args.num_episodes_during_inference:
# Compute an action (`a`).
a = algo.compute_single_action(
observation=obs,
explore=args.explore_during_inference,
policy_id="default_policy", # <- default value
)
# Send the computed action `a` to the env.
obs, reward, done, _ = env.step(a)
episode_reward += reward
# Is the episode `done`? -> Reset.
if done:
print(f"Episode done: Total reward = {episode_reward}")
obs = env.reset()
num_episodes += 1
episode_reward = 0.0
algo.stop()
ray.shutdown()
| apache-2.0 |
previtus/MGR-Project-Code | DatasetHandler/DataAugmentation.py | 1 | 10236 | from Omnipresent import len_
import os
import shutil
from DatasetHandler.FileHelperFunc import copy_folder, copy_file, file_exists, folder_exists, md5
def handle_noncanon_dataset(Settings, model_settings):
'''
Special case scenario.
We are creating a new custom dataset, instead of using one of the big officially used, "canon" datasets
:param Settings: Setting for the whole experiment
:param model_settings: Setting for our one dataset
:return:
'''
if model_settings["noncanon_dataset"] == 'expand_existing_dataset':
# Idea: take an existing dataset and expand it via
# Directly load the old segments file
# for each segment
# for each image
# apply the custom ImageDataGenerator to generate new images (depending of settings)
# save the new images into target folder as well as into this Segment
# save edited Segments array into new SegmentsFile.dump
debug_visual_output = False
debug_txt_output = False
from DatasetHandler.CreateDataset import get_path_for_dataset
from Downloader.DataOperations import LoadDataFile
from Downloader.KerasPreparation import LoadActualImages
import numpy as np
if debug_visual_output:
from matplotlib import pyplot
from keras.preprocessing.image import array_to_img
import math
target_folder = model_settings["dataset_name"]
source_folder = model_settings["source_dataset"]
filename_override = model_settings["dump_file_override"]
source_segments_path = get_path_for_dataset(source_folder, filename_override)
source_segments_dir = os.path.dirname(source_segments_path) + '/'
if not file_exists(source_segments_dir + filename_override):
print "WARNING !!!!"
print '\t',"Careful, couldn't find the file", source_segments_dir + filename_override
print '\t',"... we will instead be using ", source_segments_path
target_segments_path = get_path_for_dataset(target_folder, '')
target_segments_dir = os.path.dirname(target_segments_path) + '/'
target_segments_path = target_segments_dir+model_settings["dump_file_expanded"]
# Check if we don't alredy have it?
if (file_exists(target_segments_path) and folder_exists(target_segments_dir+'images')):
list1 = os.listdir(target_segments_dir+'images')
list2 = os.listdir(source_segments_dir+'images')
if len(list1)>=len(list2):
# Seems like we have copied it correctly too
print "We already have this dataset extended! (", len(list1), len(list2), ")"
return
else:
print '\t', file_exists(target_segments_path), target_segments_path
print '\t', folder_exists(target_segments_dir+'images'), target_segments_dir+'images'
generated_images_folder = os.path.dirname(target_segments_path) + '/images/'
print "source_segments_path", source_segments_path # /home/ekmek/Vitek/MGR-Project-Code/Data/StreetViewData/miniset_640px/SegmentsData.dump
print "source_segments_dir", source_segments_dir # /home/ekmek/Vitek/MGR-Project-Code/Data/StreetViewData/miniset_640px/
print "target_segments_path", target_segments_path # /home/ekmek/Vitek/MGR-Project-Code/Data/StreetViewData/miniset_640px_expanded/SegmentsData_images_generated_test_folder_expanded.dump
print "target_segments_dir", target_segments_dir # /home/ekmek/Vitek/MGR-Project-Code/Data/StreetViewData/miniset_640px_expanded/
# copy source_dataset -> target_dataset in dataset_name
# from source_segments_dir/images to target_segments_dir/images
source__path = source_segments_dir+'images'
target__path = target_segments_dir+'images'
copy_folder(source__path, target__path)
# test the success of this copy process!
# for each file in source_segments_dir/images check for a copy in target_segments_dir/images
was_ok = False
while not was_ok:
was_ok = True
list_of_source_files=os.listdir(source__path)
for item in list_of_source_files:
file_source = source__path + '/' + item
file_target = target__path + '/' + item
if not file_exists(file_target):
was_ok = False
else:
# file exists, but maybe just to be parainoid secure, md5 compare?
md5_1 = md5(file_source)
md5_2 = md5(file_target)
if md5_1 <> md5_2:
was_ok = False
if not was_ok:
copy_file(file_source, file_target)
print '-- was missing, now fixed:' + file_source
size_of_batch = model_settings["noncanon_dataset_genfrom1"]
image_generator = model_settings["noncanon_dataset_imagegenerator"]
print "image_generator", image_generator
Segments = LoadDataFile(source_segments_path)
number_of_images_parsed = 0
for Segment in Segments:
number_of_images = Segment.number_of_images
for i_th_image in range(0,number_of_images):
if Segment.hasLoadedImageI(i_th_image):
filename = source_segments_dir+Segment.getImageFilename(i_th_image)
number_of_images_parsed += 1
print filename
# we have one image filepath - generate data
x = LoadActualImages([filename])
y = np.array([Segment.SegmentId])
if debug_txt_output:
print "ORIGINAL id", y, "ith", i_th_image, "img:", len_(x[0])
X_batch = []
y_batch = []
from DatasetHandler.custom_image import ImageDataGenerator as custom_ImageDataGenerator
number_of_images_generated = 0
for x_gen, y_gen in image_generator.flow(x, y, batch_size=1, save_to_dir=generated_images_folder, save_prefix=str(y)+'_', save_format='jpg'):
number_of_images_generated += 1
image = x_gen[0]
filename_generated = y_gen[1][0]
id = y_gen[0][0]
if debug_txt_output:
print id, filename_generated
# save image on path filename_generated to the Segments hierarchy!
print "Segment.number_of_images", Segment.number_of_images
print "Segment.LocationsIndex", Segment.LocationsIndex
print "Segment.DistinctLocations", Segment.DistinctLocations
print "Segment.DistinctNearbyVector", Segment.DistinctNearbyVector
print "Segment.HasLoadedImages", Segment.HasLoadedImages
print "Segment.ErrorMessages", Segment.ErrorMessages
# Value 200 is the marker
location_index = Segment.LocationsIndex[i_th_image] + 1000
# accordingly we get Segment.DistinctLocations[location_index] and Segment.DistinctNearbyVector[location_index]
has_img = Segment.HasLoadedImages[i_th_image]
has_err = Segment.ErrorMessages[i_th_image]
# Add to this Segment
Segment.number_of_images += 1
Segment.LocationsIndex.append(location_index)
Segment.HasLoadedImages.append(has_img)
Segment.ErrorMessages.append(has_err)
# Change filename and path
new_filename_generated = target_segments_dir + 'images' + Segment.getImageFilename(Segment.number_of_images-1)[6:]
if debug_txt_output:
print "rename", filename_generated, "to", new_filename_generated
shutil.move(filename_generated, new_filename_generated)
print ".", new_filename_generated
X_batch.append(image)
y_batch.append(id)
#print "id", y_gen, "img:", len_(x_gen), array_md5(image)
if len(X_batch) == size_of_batch:
if debug_txt_output:
print "GENERATED ", len(y_batch), " images > ", len_(X_batch), y_batch
if debug_visual_output:
# create a grid of 3x3 images
size_for_plot = int(math.floor(math.sqrt(size_of_batch-0.1))+1)
size_for_plot_y = size_for_plot
while size_of_batch <= size_for_plot*(size_for_plot_y-1):
size_for_plot_y -= 1
print size_for_plot, "x", size_for_plot_y, " grid"
for i in range(0, len(X_batch)):
pyplot.subplot(size_for_plot_y,size_for_plot,i+1)
img = X_batch[i]
backimg = array_to_img(img)
pyplot.imshow(backimg)
# show the plot
pyplot.show()
break
break # end generation for this one image
if debug_txt_output:
print "Save new images from id", y, " in", len_(X_batch)
print "number_of_images_parsed", number_of_images_parsed
from Downloader.DataOperations import SaveDataFile
print "Saving new Segments file into ", target_segments_path
SaveDataFile(target_segments_path, Segments)
else:
print "This type of noncanon dataset generation has not yet been implemented!"
| mit |
astocko/statsmodels | statsmodels/datasets/copper/data.py | 28 | 2316 | """World Copper Prices 1951-1975 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "World Copper Market 1951-1975 Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """World Copper Market 1951-1975"""
DESCRLONG = """This data describes the world copper market from 1951 through 1975. In an
example, in Gill, the outcome variable (of a 2 stage estimation) is the world
consumption of copper for the 25 years. The explanatory variables are the
world consumption of copper in 1000 metric tons, the constant dollar adjusted
price of copper, the price of a substitute, aluminum, an index of real per
capita income base 1970, an annual measure of manufacturer inventory change,
and a time trend.
"""
NOTE = """
Number of Observations - 25
Number of Variables - 6
Variable name definitions::
WORLDCONSUMPTION - World consumption of copper (in 1000 metric tons)
COPPERPRICE - Constant dollar adjusted price of copper
INCOMEINDEX - An index of real per capita income (base 1970)
ALUMPRICE - The price of aluminum
INVENTORYINDEX - A measure of annual manufacturer inventory trend
TIME - A time trend
Years are included in the data file though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/copper.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
def load_pandas():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
| bsd-3-clause |
theoryno3/scikit-learn | examples/decomposition/plot_incremental_pca.py | 243 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
exclude/monki | monki/settings.py | 1 | 6246 | from pathlib import Path
from django.utils import crypto
from decouple import config, Csv
from dj_database_url import parse as db_url
BASE_DIR = Path(__file__).absolute().parent
ADMINS = (
('include', 'contato@xchan.pw'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', default=crypto.get_random_string(32))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool, default=False)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv(), default='*')
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd
'compressor',
'debug_toolbar',
'django_cleanup',
'django_extensions',
'imagekit',
'rest_framework',
# project
'monki.boards',
'monki.core',
'monki.home',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'monki.boards.middleware.UserCookieMiddleware',
'monki.boards.middleware.UserBanMiddleware',
]
ROOT_URLCONF = 'monki.urls'
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(BASE_DIR / 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'monki.boards.context_processors.inject_categories',
],
},
},
]
WSGI_APPLICATION = 'monki.wsgi.application'
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'monki.core.middleware.show_toolbar',
'JQUERY_URL': None,
}
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {'default': config('DATABASE_URL', cast=db_url, default='sqlite:///db.sqlite3')}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
STATICFILES_DIRS = [
str(BASE_DIR / 'static'),
str(BASE_DIR.parent / 'node_modules'),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
STATIC_URL = '/static/'
STATIC_ROOT = str(BASE_DIR.parent / 'static-root')
MEDIA_URL = '/media/'
MEDIA_ROOT = str(BASE_DIR.parent / 'media-root')
# Django-compressor
# https://django-compressor.readthedocs.org/en/latest/
COMPRESS_PRECOMPILERS = [
('text/x-sass', 'django_libsass.SassCompiler'),
('text/x-scss', 'django_libsass.SassCompiler'),
]
COMPRESS_ENABLED = not DEBUG
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.rCSSMinFilter',
]
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter',
]
# Django-libsass
# https://github.com/torchbox/django-libsass
LIBSASS_SOURCEMAPS = True
# Django-imagekit
# https://django-imagekit.readthedocs.io
if not DEBUG:
IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = 'imagekit.cachefiles.strategies.Optimistic'
# E-mail
# https://docs.djangoproject.com/en/1.9/topics/email/
DEFAULT_FROM_EMAIL = 'no-reply@xchan.pw'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'no-reply@xchan.pw'
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='123change')
EMAIL_PORT = 587
EMAIL_SUBJECT_PREFIX = '[XCHAN] '
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# Logging
# https://docs.djangoproject.com/en/1.9/topics/logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# File upload
# https://docs.djangoproject.com/en/1.9/topics/http/file-uploads/
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# Django Rest Framework
# http://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
'PAGE_SIZE': 10,
}
| agpl-3.0 |
theoryno3/scikit-learn | sklearn/utils/random.py | 232 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
pianomania/scikit-learn | examples/cluster/plot_kmeans_digits.py | 42 | 4491 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/cluster/plot_kmeans_digits.py | 42 | 4491 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
tszym/ansible | lib/ansible/modules/storage/zfs/zfs_facts.py | 8 | 8180 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs_facts
short_description: Gather facts about ZFS datasets.
description:
- Gather facts from ZFS dataset properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS dataset name.
aliases: [ "ds", "dataset" ]
required: yes
recurse:
description:
- Specifies if properties for any children should be recursively
displayed.
type: bool
default: False
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zfs(1M) man page.
aliases: [ "props" ]
default: all
required: false
type:
description:
- Specifies which datasets types to display. Multiple values have to be
provided in comma-separated form.
aliases: [ "props" ]
default: all
choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
required: false
depth:
description:
- Specifiies recurion depth.
default: None
required: false
'''
EXAMPLES = '''
- name: Gather facts about ZFS dataset rpool/export/home
zfs_facts:
dataset: rpool/export/home
- name: Report space usage on ZFS filesystems under data/home
zfs_facts:
name: data/home
recurse: yes
type: filesystem
- debug:
msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
with_items: '{{ ansible_zfs_datasets }}'
'''
RETURN = '''
name:
description: ZFS dataset name
returned: always
type: string
sample: rpool/var/spool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
recurse:
description: if we should recurse over ZFS dataset
returned: if 'recurse' is set to True
type: boolean
sample: True
zfs_datasets:
description: ZFS dataset facts
returned: always
type: string
sample:
{
"aclinherit": "restricted",
"aclmode": "discard",
"atime": "on",
"available": "43.8G",
"canmount": "on",
"casesensitivity": "sensitive",
"checksum": "on",
"compression": "off",
"compressratio": "1.00x",
"copies": "1",
"creation": "Thu Jun 16 11:37 2016",
"dedup": "off",
"devices": "on",
"exec": "on",
"filesystem_count": "none",
"filesystem_limit": "none",
"logbias": "latency",
"logicalreferenced": "18.5K",
"logicalused": "3.45G",
"mlslabel": "none",
"mounted": "yes",
"mountpoint": "/rpool",
"name": "rpool",
"nbmand": "off",
"normalization": "none",
"org.openindiana.caiman:install": "ready",
"primarycache": "all",
"quota": "none",
"readonly": "off",
"recordsize": "128K",
"redundant_metadata": "all",
"refcompressratio": "1.00x",
"referenced": "29.5K",
"refquota": "none",
"refreservation": "none",
"reservation": "none",
"secondarycache": "all",
"setuid": "on",
"sharenfs": "off",
"sharesmb": "off",
"snapdir": "hidden",
"snapshot_count": "none",
"snapshot_limit": "none",
"sync": "standard",
"type": "filesystem",
"used": "4.41G",
"usedbychildren": "4.41G",
"usedbydataset": "29.5K",
"usedbyrefreservation": "0",
"usedbysnapshots": "0",
"utf8only": "off",
"version": "5",
"vscan": "off",
"written": "29.5K",
"xattr": "on",
"zoned": "off"
}
'''
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
class ZFSFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.recurse = module.params['recurse']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self.type = module.params['type']
self.depth = module.params['depth']
self._datasets = defaultdict(dict)
self.facts = []
def dataset_exists(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
if self.recurse:
cmd.append('-r')
if int(self.depth) != 0:
cmd.append('-d')
cmd.append('%s' % self.depth)
if self.type:
cmd.append('-t')
cmd.append(self.type)
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
dataset, property, value = line.split('\t')
self._datasets[dataset].update({property: value})
for k, v in iteritems(self._datasets):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_datasets': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
recurse=dict(required=False, default=False, type='bool'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
depth=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
zfs_facts = ZFSFacts(module)
result = {}
result['changed'] = False
result['name'] = zfs_facts.name
if zfs_facts.parsable:
result['parsable'] = zfs_facts.parsable
if zfs_facts.recurse:
result['recurse'] = zfs_facts.recurse
if zfs_facts.dataset_exists():
result['ansible_facts'] = zfs_facts.get_facts()
else:
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ray-project/ray | rllib/examples/custom_vector_env.py | 1 | 2237 | import argparse
import os
import ray
from ray import air, tune
from ray.rllib.examples.env.mock_env import MockVectorEnv
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check_learning_achieved
from ray.tune.registry import get_trainable_cls
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.",
)
parser.add_argument(
"--stop-iters", type=int, default=50, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps", type=int, default=100000, help="Number of timesteps to train."
)
parser.add_argument(
"--stop-reward", type=float, default=35.0, help="Reward at which we stop training."
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# episode-len=100
# num-envs=4 (note that these are fake-envs as the MockVectorEnv only
# carries a single CartPole sub-env in it).
tune.register_env("custom_vec_env", lambda env_ctx: MockVectorEnv(100, 4))
config = (
get_trainable_cls(args.run)
.get_default_config()
.environment("custom_vec_env")
.framework(args.framework)
.rollouts(num_rollout_workers=2)
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
tuner = tune.Tuner(
args.run,
param_space=config.to_dict(),
run_config=air.RunConfig(stop=stop, verbose=1),
)
results = tuner.fit()
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
| apache-2.0 |
pianomania/scikit-learn | sklearn/grid_search.py | 16 | 40213 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
pianomania/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 51 | 12300 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_equal(X, Xt)
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
cainiaocome/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 352 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
ray-project/ray | python/ray/data/tests/test_mars.py | 1 | 1480 | import pytest
import ray
import mars
import mars.dataframe as md
import pyarrow as pa
@pytest.fixture(scope="module")
def ray_start_regular(request): # pragma: no cover
try:
yield ray.init(num_cpus=16)
finally:
ray.shutdown()
def test_mars(ray_start_regular):
import pandas as pd
cluster = mars.new_cluster_in_ray(worker_num=2, worker_cpu=1)
n = 10000
pdf = pd.DataFrame({"a": list(range(n)), "b": list(range(n, 2 * n))})
df = md.DataFrame(pdf)
# Convert mars dataframe to ray dataset
ds = ray.data.from_mars(df)
pd.testing.assert_frame_equal(ds.to_pandas(), df.to_pandas())
ds2 = ds.filter(lambda row: row["a"] % 2 == 0)
assert ds2.take(5) == [{"a": 2 * i, "b": n + 2 * i} for i in range(5)]
# Convert ray dataset to mars dataframe
df2 = ds2.to_mars()
pd.testing.assert_frame_equal(
df2.head(5).to_pandas(),
pd.DataFrame({"a": list(range(0, 10, 2)), "b": list(range(n, n + 10, 2))}),
)
# Test Arrow Dataset
pdf2 = pd.DataFrame({c: range(5) for c in "abc"})
ds3 = ray.data.from_arrow([pa.Table.from_pandas(pdf2) for _ in range(3)])
df3 = ds3.to_mars()
pd.testing.assert_frame_equal(
df3.head(5).to_pandas(),
pdf2,
)
# Test simple datasets
with pytest.raises(NotImplementedError):
ray.data.range(10).to_mars()
cluster.stop()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 |
cainiaocome/scikit-learn | sklearn/tests/test_kernel_approximation.py | 242 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
ray-project/ray | python/ray/util/collective/collective_group/gloo_util.py | 1 | 10286 | """Code to wrap some GLOO API calls."""
import asyncio
import time
from typing import List
import numpy
import ray
import ray.experimental.internal_kv as internal_kv
from ray._private.gcs_utils import GcsClient
from ray.util.collective.types import ReduceOp, torch_available
from ray.util.queue import _QueueActor
try:
import pygloo
except ImportError:
raise ImportError(
"Can not import pygloo. Please run 'pip install pygloo' to install pygloo."
)
GLOO_REDUCE_OP_MAP = {
ReduceOp.SUM: pygloo.ReduceOp.SUM,
ReduceOp.PRODUCT: pygloo.ReduceOp.PRODUCT,
ReduceOp.MIN: pygloo.ReduceOp.MIN,
ReduceOp.MAX: pygloo.ReduceOp.MAX,
}
NUMPY_GLOO_DTYPE_MAP = {
# INT types
numpy.int: pygloo.glooDataType_t.glooInt64,
numpy.uint8: pygloo.glooDataType_t.glooUint8,
numpy.uint32: pygloo.glooDataType_t.glooUint32,
numpy.uint64: pygloo.glooDataType_t.glooUint64,
numpy.int8: pygloo.glooDataType_t.glooInt8,
numpy.int32: pygloo.glooDataType_t.glooInt32,
numpy.int64: pygloo.glooDataType_t.glooInt64,
# FLOAT types
numpy.half: pygloo.glooDataType_t.glooFloat16,
numpy.float: pygloo.glooDataType_t.glooFloat64,
numpy.float16: pygloo.glooDataType_t.glooFloat16,
numpy.float32: pygloo.glooDataType_t.glooFloat32,
numpy.float64: pygloo.glooDataType_t.glooFloat64,
numpy.double: pygloo.glooDataType_t.glooFloat64,
}
if torch_available():
import torch
TORCH_GLOO_DTYPE_MAP = {
torch.int: pygloo.glooDataType_t.glooInt32,
torch.uint8: pygloo.glooDataType_t.glooUint8,
torch.int8: pygloo.glooDataType_t.glooInt8,
torch.int32: pygloo.glooDataType_t.glooInt32,
torch.int64: pygloo.glooDataType_t.glooInt64,
torch.long: pygloo.glooDataType_t.glooInt64,
# FLOAT types
torch.half: pygloo.glooDataType_t.glooFloat16,
torch.float: pygloo.glooDataType_t.glooFloat32,
torch.float16: pygloo.glooDataType_t.glooFloat16,
torch.float32: pygloo.glooDataType_t.glooFloat32,
torch.float64: pygloo.glooDataType_t.glooFloat64,
torch.double: pygloo.glooDataType_t.glooFloat64,
}
TORCH_NUMPY_DTYPE_MAP = {
# INT types
torch.int: numpy.int32,
torch.uint8: numpy.uint8,
torch.int8: numpy.int8,
torch.int32: numpy.int32,
torch.int64: numpy.int64,
torch.long: numpy.int64,
# FLOAT types
torch.half: numpy.half,
torch.float: numpy.float32,
torch.float16: numpy.float16,
torch.float32: numpy.float32,
torch.float64: numpy.float64,
}
def create_gloo_context(rank, world_size):
"""Create a GLOO context using GLOO APIs.
Args:
rank: the rank of this process.
world_size: the number of processes of this collective group.
Returns:
context (pygloo.Context): a GLOO context.
"""
context = pygloo.rendezvous.Context(rank, world_size)
return context
def get_gloo_reduce_op(reduce_op):
"""Map the reduce op to GLOO reduce op type.
Args:
reduce_op: ReduceOp Enum (SUM/PRODUCT/MIN/MAX).
Returns:
(pygloo.ReduceOp): the mapped GLOO reduce op.
"""
if reduce_op not in GLOO_REDUCE_OP_MAP:
raise RuntimeError("Gloo does not support reduce op: '{}'.".format(reduce_op))
return GLOO_REDUCE_OP_MAP[reduce_op]
def get_gloo_tensor_dtype(tensor):
"""Return the corresponded GLOO dtype given a tensor."""
if isinstance(tensor, numpy.ndarray):
return NUMPY_GLOO_DTYPE_MAP[tensor.dtype.type]
if torch_available():
if isinstance(tensor, torch.Tensor):
if not tensor.is_cuda:
return TORCH_GLOO_DTYPE_MAP[tensor.dtype]
else:
raise ValueError(
"Expect torch CPU tensor. Got {}.".format(tensor.device)
)
raise ValueError("Unsupported tensor type. Got: {}.".format(type(tensor)))
def get_numpy_tensor_dtype(tensor):
"""Return the corresponded Cupy dtype given a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.dtype.type
if torch_available():
if isinstance(tensor, torch.Tensor):
return TORCH_NUMPY_DTYPE_MAP[tensor.dtype]
raise ValueError(
"Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor))
)
def get_tensor_ptr(tensor):
"""Return the pointer to the underlying memory storage of a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.ctypes.data
if torch_available():
if isinstance(tensor, torch.Tensor):
if tensor.is_cuda:
raise RuntimeError(
"Torch tensor must be on CPU when using GLOO collectives."
)
return tensor.data_ptr()
raise ValueError(
"Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor))
)
def get_tensor_n_elements(tensor):
"""Return the number of elements in a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.size
if torch_available():
if isinstance(tensor, torch.Tensor):
return torch.numel(tensor)
raise ValueError("Unsupported tensor type. Got: {}.".format(type(tensor)))
def get_gloo_store_path(store_name):
from ray._private.utils import get_ray_temp_dir
store_path = f"{get_ray_temp_dir()}_collective/gloo/{store_name}"
return store_path
def get_tensor_device(tensor):
if isinstance(tensor, numpy.ndarray):
return "cpu"
elif torch_available() and isinstance(tensor, torch.Tensor):
if not tensor.is_cuda:
return "cpu"
else:
return "cuda"
else:
raise RuntimeError("Unrecognized tensor type: '{}'.".format(type(tensor)))
def get_tensor_shape(tensor):
"""Return the shape of the tensor as a list."""
if isinstance(tensor, numpy.ndarray):
return list(tensor.shape)
if torch_available():
if isinstance(tensor, torch.Tensor):
return list(tensor.size())
raise ValueError(
"Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor))
)
def copy_tensor(dst_tensor, src_tensor):
"""Copy the content from src_tensor to dst_tensor.
Args:
dst_tensor: the tensor to copy from.
src_tensor: the tensor to copy to.
Returns:
None
"""
copied = True
if isinstance(dst_tensor, numpy.ndarray) and isinstance(src_tensor, numpy.ndarray):
numpy.copyto(dst_tensor, src_tensor)
elif torch_available():
if isinstance(dst_tensor, torch.Tensor) and isinstance(
src_tensor, torch.Tensor
):
dst_tensor.copy_(src_tensor)
elif isinstance(dst_tensor, torch.Tensor) and isinstance(
src_tensor, numpy.ndarray
):
t = torch.Tensor(src_tensor)
dst_tensor.copy_(t)
elif isinstance(dst_tensor, numpy.ndarray) and isinstance(
src_tensor, torch.Tensor
):
t = src_tensor.numpy()
numpy.copyto(dst_tensor, t)
else:
copied = False
else:
copied = False
if not copied:
raise ValueError(
"Unsupported tensor type. Got: {} and {}. Supported "
"CPU tensor types are: torch.Tensor, numpy.ndarray.".format(
type(dst_tensor), type(src_tensor)
)
)
# Note(Hao): this requires Ray >= 1.2.0,
# otherwise _QueueActor is an actor class.
class glooQueue(_QueueActor):
def index(self, group_name):
try:
return self.queue._queue.index(group_name)
except ValueError:
return -1
@ray.remote(num_cpus=0)
class SignalActor:
def __init__(self, world_size):
self.ready_events = [asyncio.Event() for _ in range(world_size)]
self.world_size = world_size
def send(self, rank, clear=False):
self.ready_events[rank].set()
if clear:
self.ready_events[rank].clear()
async def wait(self, should_wait=True):
if should_wait:
for i in range(self.world_size):
await self.ready_events[i].wait()
# The custom store which is implementated in Ray internal kv storage, helping
# to store the rank meta information when setting up the gloo collective group.
class RayInternalKvStore:
def __init__(self, group_name: str):
self._group_name = group_name
self._job_id = ray.get_runtime_context().job_id
gcs_address = ray._private.worker._global_node.gcs_address
self._gcs_client = GcsClient(address=gcs_address, nums_reconnect_retry=10)
internal_kv._initialize_internal_kv(self._gcs_client)
def set(self, key: str, data: bytes) -> bool:
key = self.__concat_key_with_prefixes(key)
ret = internal_kv._internal_kv_put(key, data)
return ret
def get(self, key: str) -> bytes:
key = self.__concat_key_with_prefixes(key)
ret = internal_kv._internal_kv_get(key)
return ret
def delete(self, key: str) -> int:
key = self.__concat_key_with_prefixes(key)
ret = internal_kv._internal_kv_del(key)
return ret
def del_keys(self, keys: List[str]) -> List[int]:
results = []
for key in keys:
results.append(self.delete(key))
return results
def wait(self, keys: List[str]):
while True:
all_exist = True
for key in keys:
key = self.__concat_key_with_prefixes(key)
result = internal_kv._internal_kv_exists(key)
if not result:
all_exist = False
break
if all_exist:
return True
time.sleep(1)
def __concat_key_with_prefixes(self, original_key):
"""Concat the necessary prefixes and key for isolation purpose for
different jobs and different groups."""
return f"{self._job_id.hex()}-{self._group_name}-{original_key}"
| apache-2.0 |
yyjiang/scikit-learn | examples/ensemble/plot_forest_iris.py | 332 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/applications/plot_prediction_latency.py | 83 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
theoryno3/scikit-learn | examples/manifold/plot_lle_digits.py | 180 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
ZRiddle/BoostARoota | Dev_Testing/FS_Brute_Force.py | 1 | 7786 | #Starting with a 0/1 classification task
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from boruta import BorutaPy
from random import randint
import os
os.chdir('/home/chase.dehan/source/DataScience/Tools/Academic/Variable Selection')
########################################################################################
#
# Preprocessing
#
########################################################################################
#Bring in data and split into X and Y
lsvt = pd.read_csv("LSVT_VR.csv")
# Data came from here: https://archive.ics.uci.edu/ml/datasets/LSVT+Voice+Rehabilitation
X = lsvt[lsvt.columns[1:lsvt.shape[1]]]
Y = lsvt[lsvt.columns[0]] - 1
#The next step would be doing an initial ranking and split out of the variables,
#BUT, we will skip that for now
#Split into a test/train
seed = 1228
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
########################################################################################
#
# Running the model - ALL DATA
#
########################################################################################
# fit model no training data
model = XGBClassifier()
model.fit(X_train, y_train)
# make predictions for test data
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
########################################################################################
#
# Run with Boruta
#
########################################################################################
# define random forest classifier, with utilising all cores and
# sampling in proportion to y labels
rf = RandomForestClassifier(n_jobs=-1, class_weight='auto', max_depth=5)
# define Boruta feature selection method
feat_selector = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=1)
# find all relevant features
feat_selector.fit(X_train.values, y_train.values)
# call transform() on X to filter it down to selected features
X_train_filter = feat_selector.transform(X_train.values)
X_test_filter = feat_selector.transform(X_test.values)
# fit model to training data
model = XGBClassifier()
model.fit(X_train_filter, y_train)
# make predictions for test data
y_pred = model.predict(X_test_filter)
boruta_pred = [round(value) for value in y_pred]
# evaluate predictions
boruta_acc = accuracy_score(y_test, boruta_pred)
#Compare Boruta to all_features
print("Boruta Accuracy: %.2f%%" % (boruta_acc * 100.0))
print("All Feature Accuracy: %.2f%%" % (accuracy * 100.0))
#Boruta outperforms using 26 variables versus 310 in all features
#Performs pretty well
########################################################################################
#
# First phase - Create random subsets of variables and run through a model
#
########################################################################################
ncol = X_train.shape[1]
#Generate random samples of variables
var_list = []
num_iterations = 100
for i in np.arange(num_iterations):
num_vars = randint(10,50) #Should hardcode the max size to scale with the dataset
var_list.append( np.random.choice(ncol, num_vars, replace=False) )
#Then evaluate the performance of each model:
model = XGBClassifier()
#Split out the train/test from the above, because I created a validation
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_train, y_train, test_size=test_size, random_state=seed)
#Declare empty dataframe to be filled
eval_results = pd.DataFrame(index=np.arange(num_iterations), columns=np.insert(X_train2.columns.values, 0, ["Iteration", "Accuracy"]) ) #declare an empty list to place output of each into
for i in np.arange(num_iterations):
these_vars = list(X_train2.columns[var_list[i]])
X_train_filter = X_train2[these_vars]
X_test_filter = X_test2[these_vars]
#Fit and make predictions off the data
model.fit(X_train_filter, y_train2)
predictions = [round(value) for value in model.predict(X_test_filter)]
acc_score = accuracy_score(y_test2, predictions)
#Set the values of the scores and place into a larger
var_scores = []
for j in np.arange(ncol):
if X_train2.columns[j] not in these_vars:
value = -1
else:
value_index = these_vars.index(X_train2.columns[j])
value = model.feature_importances_[value_index]
var_scores.append(value)
eval_results.loc[i] = np.insert(var_scores, 0, [i, acc_score])
eval_results = eval_results.sort("Accuracy")
#Ok, I now have results for each of the models
#The timing of running through these models is pretty fast, much faster than 100 iterations on Boruta
#Just going to run the model on the top model from eval_results
a = list(eval_results.tail(11).ix[:, "Iteration"] )
top_values = [int(x) for x in a]
#top_values = [120, 373, 225, 807, 121]
preds = pd.DataFrame()
for i in top_values:
these_vars = list(X_train.columns[var_list[i]])
X_train_filter = X_train[these_vars]
X_test_filter = X_test[these_vars]
model.fit(X_train_filter, y_train)
predictions = [round(value) for value in model.predict(X_test_filter)]
preds.insert(0, i, predictions)
preds.insert(0, "combined", np.where(preds.sum(1) >= 6, 1, 0))
ensemble_brute_acc = accuracy_score(y_test, preds["combined"])
#Then, just taking the top model
these_vars = list(X_train.columns[var_list[top_values[4]]])
X_train_filter = X_train[these_vars]
X_test_filter = X_test[these_vars]
model.fit(X_train_filter, y_train)
predictions = [round(value) for value in model.predict(X_test_filter)]
brute_force_acc = accuracy_score(y_test, predictions)
print("Brute Force Accuracy: %.2f%%" % (brute_force_acc * 100.0))
print("Boruta Accuracy: %.2f%%" % (boruta_acc * 100.0))
print("All Feature Accuracy: %.2f%%" % (accuracy * 100.0))
print("Ensemble Brute Accuracy: %.2f%%" % (ensemble_brute_acc * 100.0))
#I now know that the brute force approach can outperform Boruta, but it is an expensive process
#And could take an immense amount of time to force through it, especially on large datasets
########################################################################################
#
# Second Phase - Determine which variables are important
#
########################################################################################
#This should follow some sort of optimization process, where the algo automatically selects variables
#Want to rank the models for each iteration
#And rank the variables according to relative scalse
#Variable rankings from highest value to lowest
# High ranking in high scoring models
# High ranking with low scores
# Low on most, but high in at least 1, or in top model(s)
#Looking for non linear interactions
# Low ranking in high scoring models
# Low or no ranking in only poor models
#So, how do we move towards the objective?
#Look at the relative scores - how many 0 values are there?
#Importances will add up to 1
#Should I normalize for num variables?
#Take average of the scores
#Find places where two variables together are above their averages
#These variables should probably be tested together
#Make some new combinations:
# Make it probabilistic
# What combinations are most likely to yield higher performance?
# Add these points and recalculate the probabilities
# Repeat
| mit |
cbentivoglio/neurolearn_clone | build/lib/nltools/analysis.py | 2 | 25878 | '''
NeuroLearn Analysis Tools
=========================
These tools provide the ability to quickly run
machine-learning analyses on imaging data
Author: Luke Chang
License: MIT
'''
# ToDo
# 1) add roc functionality for classification
# 2) add thresholding functionality
# 3) add bootstrapping functionality
# 4) add tests
# 5) add within subject checks and plots
# 6) Plot probabilities
import os
import importlib
import nibabel as nib
import sklearn
from sklearn.pipeline import Pipeline
from nilearn.input_data import NiftiMasker
import pandas as pd
import numpy as np
from nilearn.plotting import plot_stat_map
import seaborn as sns
import matplotlib.pyplot as plt
from nltools.plotting import dist_from_hyperplane_plot, scatterplot, probability_plot, roc_plot
from nltools.stats import pearson
from scipy.stats import norm, binom_test
from sklearn.metrics import auc
# Paths
resource_dir = os.path.join(os.path.dirname(__file__),'resources')
class Predict:
def __init__(self, data, Y, subject_id = None, algorithm=None, cv_dict=None, mask=None,
output_dir='.', **kwargs):
""" Initialize Predict.
Args:
data: nibabel data instance
Y: vector of training labels
subject_id: vector of labels corresponding to each subject
algorithm: Algorithm to use for prediction. Must be one of 'svm', 'svr',
'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest',
or 'randomforestClassifier'
cv_dict: Type of cross_validation to use. A dictionary of {'kfold',5} or
{'loso':subject_id}.
mask: binary nibabel mask
output_dir: Directory to use for writing all outputs
**kwargs: Additional keyword arguments to pass to the prediction algorithm
"""
self.output_dir = output_dir
if subject_id is not None:
self.subject_id = subject_id
if mask is not None:
if type(mask) is not nib.nifti1.Nifti1Image:
raise ValueError("mask is not a nibabel instance")
self.mask = mask
else:
self.mask = nib.load(os.path.join(resource_dir,'MNI152_T1_2mm_brain_mask_dil.nii.gz'))
if type(data) is not nib.nifti1.Nifti1Image:
raise ValueError("data is not a nibabel instance")
self.nifti_masker = NiftiMasker(mask_img=mask)
self.data = self.nifti_masker.fit_transform(data)
if self.data.shape[0]!= len(Y):
raise ValueError("Y does not match the correct size of data")
self.Y = Y
if algorithm is not None:
self.set_algorithm(algorithm, **kwargs)
if cv_dict is not None:
self.set_cv(cv_dict)
def predict(self, algorithm=None, cv_dict=None, save_images=True, save_output=True,
save_plot = True, **kwargs):
""" Run prediction
Args:
algorithm: Algorithm to use for prediction. Must be one of 'svm', 'svr',
'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest',
or 'randomforestClassifier'
cv_dict: Type of cross_validation to use. A dictionary of {'kfold',5} or
{'loso':subject_id}.
save_images: Boolean indicating whether or not to save images to file.
save_output: Boolean indicating whether or not to save prediction output to file.
save_plot: Boolean indicating whether or not to create plots.
**kwargs: Additional keyword arguments to pass to the prediction algorithm
"""
if algorithm is not None:
self.set_algorithm(algorithm, **kwargs)
if self.algorithm is None:
raise ValueError("Make sure you specify an 'algorithm' to use.")
# Overall Fit for weight map
predicter = self.predicter
predicter.fit(self.data, self.Y)
self.yfit = predicter.predict(self.data) # will be overwritten if xvalidating
if save_images:
self._save_image(predicter)
# Cross-Validation Fit
if cv_dict is not None:
self.set_cv(cv_dict)
if hasattr(self,'cv'):
predicter_cv = self.predicter
if self.prediction_type is 'classification':
if self.algorithm not in ['svm','ridgeClassifier','ridgeClassifierCV']:
self.prob = np.zeros(len(self.Y))
else:
xval_dist_from_hyperplane = np.zeros(len(self.Y))
if self.algorithm is 'svm' and self.predicter.probability:
self.prob = np.zeros(len(self.Y))
for train, test in self.cv:
predicter_cv.fit(self.data[train], self.Y[train])
self.yfit[test] = predicter_cv.predict(self.data[test])
if self.prediction_type is 'classification':
if self.algorithm not in ['svm','ridgeClassifier','ridgeClassifierCV']:
self.prob[test] = predicter_cv.predict_proba(self.data[test])
else:
xval_dist_from_hyperplane[test] = predicter_cv.decision_function(self.data[test])
if self.algorithm is 'svm' and self.predicter.probability:
self.prob[test] = predicter_cv.predict_proba(self.data[test])
if save_output:
self.stats_output = pd.DataFrame({
'SubID' : self.subject_id,
'Y' : self.Y,
'yfit' : self.yfit})
if self.prediction_type is 'classification':
if self.algorithm not in ['svm','ridgeClassifier','ridgeClassifierCV']:
self.stats_output['Probability'] = self.prob
else:
self.stats_output['xval_dist_from_hyperplane']=xval_dist_from_hyperplane
if self.algorithm is 'svm' and self.predicter.probability:
self.stats_output['Probability'] = self.prob
self._save_stats_output()
if save_plot:
self._save_plot(predicter_cv)
if self.prediction_type is 'classification':
self.mcr = np.mean(self.yfit==self.Y)
print 'overall CV accuracy: %.2f' % self.mcr
elif self.prediction_type is 'prediction':
self.rmse = np.sqrt(np.mean((self.yfit-self.Y)**2))
self.r = np.corrcoef(self.Y,self.yfit)[0,1]
print 'overall Root Mean Squared Error: %.2f' % self.rmse
print 'overall Correlation: %.2f' % self.r
def set_algorithm(self, algorithm, **kwargs):
""" Set the algorithm to use in subsequent prediction analyses.
Args:
algorithm: The prediction algorithm to use. Either a string or an (uninitialized)
scikit-learn prediction object. If string, must be one of 'svm','svr', linear',
'logistic','lasso','lassopcr','lassoCV','ridge','ridgeCV','ridgeClassifier',
'randomforest', or 'randomforestClassifier'
kwargs: Additional keyword arguments to pass onto the scikit-learn clustering
object.
"""
self.algorithm = algorithm
def load_class(import_string):
class_data = import_string.split(".")
module_path = '.'.join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
algs_classify = {
'svm':'sklearn.svm.SVC',
'logistic':'sklearn.linear_model.LogisticRegression',
'ridgeClassifier':'sklearn.linear_model.RidgeClassifier',
'ridgeClassifierCV':'sklearn.linear_model.RidgeClassifierCV',
'randomforestClassifier':'sklearn.ensemble.RandomForestClassifier'
}
algs_predict = {
'svr':'sklearn.svm.SVR',
'linear':'sklearn.linear_model.LinearRegression',
'lasso':'sklearn.linear_model.Lasso',
'lassoCV':'sklearn.linear_model.LassoCV',
'ridge':'sklearn.linear_model.Ridge',
'ridgeCV':'sklearn.linear_model.RidgeCV',
'randomforest':'sklearn.ensemble.RandomForest'
}
if algorithm in algs_classify.keys():
self.prediction_type = 'classification'
alg = load_class(algs_classify[algorithm])
self.predicter = alg(**kwargs)
elif algorithm in algs_predict:
self.prediction_type = 'prediction'
alg = load_class(algs_predict[algorithm])
self.predicter = alg(**kwargs)
elif algorithm is 'lassopcr':
self.prediction_type = 'prediction'
from sklearn.linear_model import Lasso
from sklearn.decomposition import PCA
self._lasso = Lasso()
self._pca = PCA()
self.predicter = Pipeline(steps=[('pca', self._pca), ('lasso', self._lasso)])
elif algorithm is 'pcr':
self.prediction_type = 'prediction'
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
self._regress = LinearRegression()
self._pca = PCA()
self.predicter = Pipeline(steps=[('pca', self._pca), ('regress', self._regress)])
else:
raise ValueError("""Invalid prediction/classification algorithm name. Valid
options are 'svm','svr', 'linear', 'logistic', 'lasso', 'lassopcr',
'lassoCV','ridge','ridgeCV','ridgeClassifier', 'randomforest', or
'randomforestClassifier'.""")
def set_cv(self, cv_dict):
""" Set the CV algorithm to use in subsequent prediction analyses.
Args:
cv_dict: Type of cross_validation to use. A dictionary of {'kfold',5} or {'loso':subject_id}.
"""
if type(cv_dict) is dict:
if cv_dict.keys()[0] is 'kfolds':
from sklearn.cross_validation import StratifiedKFold
self.cv = StratifiedKFold(self.Y, n_folds=cv_dict.values()[0])
elif cv_dict.keys()[0] is 'loso':
from sklearn.cross_validation import LeaveOneLabelOut
self.cv = LeaveOneLabelOut(labels=cv_dict.values()[0])
else:
raise ValueError("Make sure you specify a dictionary of {'kfold',5} or {'loso':subject_id}.")
else:
raise ValueError("Make sure 'cv_dict' is a dictionary.")
def _save_image(self, predicter):
""" Write out weight map to Nifti image.
Args:
predicter: predicter instance
Returns:
predicter_weightmap.nii.gz: Will output a nifti image of weightmap
"""
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if self.algorithm is 'lassopcr':
coef = np.dot(self._pca.components_.T,self._lasso.coef_)
coef_img = self.nifti_masker.inverse_transform(np.transpose(coef))
elif self.algorithm is 'pcr':
coef = np.dot(self._pca.components_.T,self._regress.coef_)
coef_img = self.nifti_masker.inverse_transform(np.transpose(coef))
else:
coef_img = self.nifti_masker.inverse_transform(predicter.coef_.squeeze())
nib.save(coef_img, os.path.join(self.output_dir, self.algorithm + '_weightmap.nii.gz'))
def _save_stats_output(self):
""" Write stats output to csv file.
Args:
stats_output: a pandas file with prediction output
Returns:
predicter_stats_output.csv: Will output a csv file of stats output
"""
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
self.stats_output.to_csv(os.path.join(self.output_dir, self.algorithm + '_Stats_Output.csv'))
def _save_plot(self, predicter):
""" Save Plots.
Args:
predicter: predicter instance
Returns:
predicter_weightmap_montage.png: Will output a montage of axial slices of weightmap
predicter_prediction.png: Will output a plot of prediction
"""
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if self.algorithm is 'lassopcr':
coef = np.dot(self._pca.components_.T,self._lasso.coef_)
coef_img = self.nifti_masker.inverse_transform(np.transpose(coef))
elif self.algorithm is 'pcr':
coef = np.dot(self._pca.components_.T,self._regress.coef_)
coef_img = self.nifti_masker.inverse_transform(np.transpose(coef))
else:
coef_img = self.nifti_masker.inverse_transform(predicter.coef_)
overlay_img = nib.load(os.path.join(resource_dir,'MNI152_T1_2mm_brain.nii.gz'))
fig1 = plot_stat_map(coef_img, overlay_img, title=self.algorithm + " weights",
cut_coords=range(-40, 40, 10), display_mode='z')
fig1.savefig(os.path.join(self.output_dir, self.algorithm + '_weightmap_axial.png'))
if self.prediction_type == 'classification':
if self.algorithm not in ['svm','ridgeClassifier','ridgeClassifierCV']:
fig2 = probability_plot(self.stats_output)
fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_prob_plot.png'))
else:
fig2 = dist_from_hyperplane_plot(self.stats_output)
fig2.savefig(os.path.join(self.output_dir, self.algorithm +
'_xVal_Distance_from_Hyperplane.png'))
if self.algorithm is 'svm' and self.predicter.probability:
fig3 = probability_plot(self.stats_output)
fig3.savefig(os.path.join(self.output_dir, self.algorithm + '_prob_plot.png'))
elif self.prediction_type == 'prediction':
fig2 = scatterplot(self.stats_output)
fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_scatterplot.png'))
def apply_mask(data=None, weight_map=None, mask=None, method='dot_product', save_output=False, output_dir='.'):
""" Apply Nifti weight map to Nifti Images.
Args:
data: nibabel instance of data to be applied
weight_map: nibabel instance of weight map
mask: binary nibabel mask
method: type of pattern expression (e.g,. 'dot_product','correlation')
save_output: Boolean indicating whether or not to save output to csv file.
output_dir: Directory to use for writing all outputs
**kwargs: Additional parameters to pass
Returns:
pexp: Outputs a vector of pattern expression values
"""
if mask is not None:
if type(mask) is not nib.nifti1.Nifti1Image:
raise ValueError("Mask is not a nibabel instance")
else:
mask = nib.load(os.path.join(resource_dir,'MNI152_T1_2mm_brain_mask_dil.nii.gz'))
if type(data) is not nib.nifti1.Nifti1Image:
raise ValueError("Data is not a nibabel instance")
nifti_masker = NiftiMasker(mask_img=mask)
data_masked = nifti_masker.fit_transform(data)
if type(weight_map) is not nib.nifti1.Nifti1Image:
raise ValueError("Weight_map is not a nibabel instance")
weight_map_masked = nifti_masker.fit_transform(weight_map)
# Calculate pattern expression
if method is 'dot_product':
pexp = np.dot(data_masked,np.transpose(weight_map_masked)).squeeze()
elif method is 'correlation':
pexp = pearson(data_masked,weight_map_masked)
if save_output:
np.savetxt(os.path.join(output_dir,"Pattern_Expression_" + method + ".csv"), pexp, delimiter=",")
return pexp
class Roc:
def __init__(self, input_values=None, binary_outcome=None, threshold_type='optimal_overall',
forced_choice=False, **kwargs):
""" Initialize Roc instance. Object-Oriented version based on Tor Wager's Matlab roc_plot.m function
Args:
input_values: nibabel data instance
binary_outcome: vector of training labels
threshold_type: ['optimal_overall', 'optimal_balanced','minimum_sdt_bias']
**kwargs: Additional keyword arguments to pass to the prediction algorithm
"""
if len(input_values) != len(binary_outcome):
raise ValueError("Data Problem: input_value and binary_outcome are different lengths.")
if not any(binary_outcome):
raise ValueError("Data Problem: binary_outcome may not be boolean")
thr_type = ['optimal_overall', 'optimal_balanced','minimum_sdt_bias']
if threshold_type not in thr_type:
raise ValueError("threshold_type must be ['optimal_overall', 'optimal_balanced','minimum_sdt_bias']")
self.input_values = input_values
self.binary_outcome = binary_outcome
self.threshold_type = threshold_type
self.forced_choice=forced_choice
def calculate(self, input_values=None, binary_outcome=None, criterion_values=None,
threshold_type='optimal_overall', forced_choice=False, balanced_acc=False):
""" Calculate Receiver Operating Characteristic plot (ROC) for single-interval
classification.
Args:
input_values: nibabel data instance
binary_outcome: vector of training labels
criterion_values: (optional) criterion values for calculating fpr & tpr
threshold_type: ['optimal_overall', 'optimal_balanced','minimum_sdt_bias']
forced_choice: within-subject forced classification (bool). Data must be
stacked on top of each other (e.g., [1 1 1 0 0 0]).
balanced_acc: balanced accuracy for single-interval classification (bool)
**kwargs: Additional keyword arguments to pass to the prediction algorithm
"""
if input_values is not None:
self.input_values = input_values
if binary_outcome is not None:
self.binary_outcome = binary_outcome
# Create Criterion Values
if criterion_values is not None:
self.criterion_values = criterion_values
else:
self.criterion_values = np.linspace(min(self.input_values), max(self.input_values), num=50*len(self.binary_outcome))
if (forced_choice) | (self.forced_choice):
self.forced_choice=True
mn_scores = (self.input_values[self.binary_outcome] + self.input_values[self.binary_outcome])/2
self.input_values[self.binary_outcome] = self.input_values[self.binary_outcome] - mn_scores;
self.input_values[~self.binary_outcome] = self.input_values[~self.binary_outcome] - mn_scores;
self.class_thr = 0;
# Calculate true positive and false positive rate
self.tpr = np.zeros(self.criterion_values.shape)
self.fpr = np.zeros(self.criterion_values.shape)
for i,x in enumerate(self.criterion_values):
wh = self.input_values >= x
self.tpr[i] = float(sum(wh[self.binary_outcome]))/float(sum(self.binary_outcome))
self.fpr[i] = float(sum(wh[~self.binary_outcome]))/float(sum(~self.binary_outcome))
self.n_true = float(sum(self.binary_outcome))
self.n_false = float(sum(~self.binary_outcome))
# Calculate Area Under the Curve
# fix for AUC = 1 if no overlap - code not working (tpr_unique and fpr_unique can be different lengths)
# fpr_unique = np.unique(self.fpr)
# tpr_unique = np.unique(self.tpr)
# if any((fpr_unique == 0) & (tpr_unique == 1)):
# self.auc = 1 # Fix for AUC = 1 if no overlap;
# else:
# self.auc = auc(self.fpr, self.tpr) # Use sklearn auc otherwise
self.auc = auc(self.fpr, self.tpr) # Use sklearn auc
# Get criterion threshold
if not self.forced_choice:
self.threshold_type = threshold_type
if threshold_type is 'optimal_balanced':
mn = (tpr+fpr)/2
self.class_thr = self.criterion_values[np.argmax(mn)]
elif threshold_type is 'optimal_overall':
n_corr_t = self.tpr*self.n_true
n_corr_f = (1-self.fpr)*self.n_false
sm = (n_corr_t+n_corr_f)
self.class_thr = self.criterion_values[np.argmax(sm)]
elif threshold_type is 'minimum_sdt_bias':
# Calculate MacMillan and Creelman 2005 Response Bias (c_bias)
c_bias = ( norm.ppf(np.maximum(.0001, np.minimum(0.9999, self.tpr))) + norm.ppf(np.maximum(.0001, np.minimum(0.9999, self.fpr))) ) / float(2)
self.class_thr = self.criterion_values[np.argmin(abs(c_bias))]
# Calculate output
self.false_positive = (self.input_values >= self.class_thr) & (~self.binary_outcome)
self.false_negative = (self.input_values < self.class_thr) & (self.binary_outcome)
self.misclass = (self.false_negative) | (self.false_positive)
self.true_positive = (self.binary_outcome) & (~self.misclass)
self.true_negative = (~self.binary_outcome) & (~self.misclass)
self.sensitivity = sum(self.input_values[self.binary_outcome] >= self.class_thr)/self.n_true
self.specificity = 1 - sum(self.input_values[~self.binary_outcome] >= self.class_thr)/self.n_false
self.ppv = float(sum(self.true_positive))/(float(sum(self.true_positive)) + float(sum(self.false_positive)))
if self.forced_choice:
self.true_positive = self.true_positive[self.binary_outcome]
self.true_negative = self.true_negative[~self.binary_outcome]
self.false_negative = self.false_negative[self.binary_outcome]
self.false_positive = self.false_positive[~self.binary_outcome]
self.misclass = (self.false_positive) | (self.false_negative)
# Calculate Accuracy
if balanced_acc:
self.accuracy = np.mean([self.sensitivity,self.specificity]) #See Brodersen, Ong, Stephan, Buhmann (2010)
else:
self.accuracy = 1 - np.mean(self.misclass)
# Calculate p-Value using binomial test (can add hierarchical version of binomial test)
self.n = len(self.misclass)
self.accuracy_p = binom_test(int(sum(~self.misclass)), self.n, p=.5)
self.accuracy_se = np.sqrt(float(np.mean(~self.misclass)) * (float(np.mean(~self.misclass))) / self.n)
def plot(self, plot_method = 'gaussian'):
""" Create ROC Plot
Create a specific kind of ROC curve plot, based on input values
along a continuous distribution and a binary outcome variable (logical).
Args:
plot_method: type of plot ['gaussian','observed']
binary_outcome: vector of training labels
**kwargs: Additional keyword arguments to pass to the prediction algorithm
"""
self.calculate() # Calculate ROC parameters
if plot_method is 'gaussian':
if self.forced_choice:
diff_scores = self.input_values[self.binary_outcome] - self.input_values[~self.binary_outcome]
mn_diff = np.mean(diff_scores)
d = mn_diff / np.std(diff_scores)
pooled_sd = np.std(diff_scores) / np.sqrt(2);
d_a_model = mn_diff / pooled_sd
x = np.arange(-3,3,.1)
tpr_smooth = 1 - norm.cdf(x, d, 1)
fpr_smooth = 1 - norm.cdf(x, -d, 1)
else:
mn_true = np.mean(self.input_values[self.binary_outcome])
mn_false = np.mean(self.input_values[~self.binary_outcome])
var_true = np.var(self.input_values[self.binary_outcome])
var_false = np.var(self.input_values[~self.binary_outcome])
pooled_sd = np.sqrt((var_true*(self.n_true-1))/(self.n_true + self.n_false - 2))
d = (mn_true-mn_false)/pooled_sd
z_true = mn_true/pooled_sd
z_false = mn_false/pooled_sd
x = np.arange(z_false-3,z_true+3,.1)
tpr_smooth = 1-(norm.cdf(x, z_true,1))
fpr_smooth = 1-(norm.cdf(x, z_false,1))
roc_plot(fpr_smooth,tpr_smooth)
elif plot_method is 'observed':
roc_plot(self.fpr, self.tpr)
else:
raise ValueError("plot_method must be 'gaussian' or 'observed'")
def summary(self):
""" Display a formatted summary of ROC analysis.
"""
print("------------------------")
print(".:ROC Analysis Summary:.")
print("------------------------")
print("{:20s}".format("Accuracy:") + "{:.2f}".format(self.accuracy))
print("{:20s}".format("Accuracy SE:") + "{:.2f}".format(self.accuracy_se))
print("{:20s}".format("Accuracy p-value:") + "{:.2f}".format(self.accuracy_p))
print("{:20s}".format("Sensitivity:") + "{:.2f}".format(self.sensitivity))
print("{:20s}".format("Specificity:") + "{:.2f}".format(self.specificity))
print("{:20s}".format("AUC:") + "{:.2f}".format(self.auc))
print("{:20s}".format("PPV:") + "{:.2f}".format(self.ppv))
print("------------------------")
| mit |
glennq/scikit-learn | sklearn/linear_model/ridge.py | 13 | 51357 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
w[v == 0] = 0
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv,
scoring=self.scoring)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
sshleifer/object_detection_kitti | differential_privacy/dp_sgd/dp_mnist/dp_mnist.py | 15 | 21114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example differentially private trainer and evaluator for MNIST.
"""
from __future__ import division
import json
import os
import sys
import time
import numpy as np
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
from differential_privacy.dp_sgd.dp_optimizer import dp_pca
from differential_privacy.dp_sgd.dp_optimizer import sanitizer
from differential_privacy.dp_sgd.dp_optimizer import utils
from differential_privacy.privacy_accountant.tf import accountant
# parameters for the training
tf.flags.DEFINE_integer("batch_size", 600,
"The training batch size.")
tf.flags.DEFINE_integer("batches_per_lot", 1,
"Number of batches per lot.")
# Together, batch_size and batches_per_lot determine lot_size.
tf.flags.DEFINE_integer("num_training_steps", 50000,
"The number of training steps."
"This counts number of lots.")
tf.flags.DEFINE_bool("randomize", True,
"If true, randomize the input data; otherwise use a fixed "
"seed and non-randomized input.")
tf.flags.DEFINE_bool("freeze_bottom_layers", False,
"If true, only train on the logit layer.")
tf.flags.DEFINE_bool("save_mistakes", False,
"If true, save the mistakes made during testing.")
tf.flags.DEFINE_float("lr", 0.05, "start learning rate")
tf.flags.DEFINE_float("end_lr", 0.05, "end learning rate")
tf.flags.DEFINE_float("lr_saturate_epochs", 0,
"learning rate saturate epochs; set to 0 for a constant "
"learning rate of --lr.")
# For searching parameters
tf.flags.DEFINE_integer("projection_dimensions", 60,
"PCA projection dimensions, or 0 for no projection.")
tf.flags.DEFINE_integer("num_hidden_layers", 1,
"Number of hidden layers in the network")
tf.flags.DEFINE_integer("hidden_layer_num_units", 1000,
"Number of units per hidden layer")
tf.flags.DEFINE_float("default_gradient_l2norm_bound", 4.0, "norm clipping")
tf.flags.DEFINE_integer("num_conv_layers", 0,
"Number of convolutional layers to use.")
tf.flags.DEFINE_string("training_data_path",
"/tmp/mnist/mnist_train.tfrecord",
"Location of the training data.")
tf.flags.DEFINE_string("eval_data_path",
"/tmp/mnist/mnist_test.tfrecord",
"Location of the eval data.")
tf.flags.DEFINE_integer("eval_steps", 10,
"Evaluate the model every eval_steps")
# Parameters for privacy spending. We allow linearly varying eps during
# training.
tf.flags.DEFINE_string("accountant_type", "Moments", "Moments, Amortized.")
# Flags that control privacy spending during training.
tf.flags.DEFINE_float("eps", 1.0,
"Start privacy spending for one epoch of training, "
"used if accountant_type is Amortized.")
tf.flags.DEFINE_float("end_eps", 1.0,
"End privacy spending for one epoch of training, "
"used if accountant_type is Amortized.")
tf.flags.DEFINE_float("eps_saturate_epochs", 0,
"Stop varying epsilon after eps_saturate_epochs. Set to "
"0 for constant eps of --eps. "
"Used if accountant_type is Amortized.")
tf.flags.DEFINE_float("delta", 1e-5,
"Privacy spending for training. Constant through "
"training, used if accountant_type is Amortized.")
tf.flags.DEFINE_float("sigma", 4.0,
"Noise sigma, used only if accountant_type is Moments")
# Flags that control privacy spending for the pca projection
# (only used if --projection_dimensions > 0).
tf.flags.DEFINE_float("pca_eps", 0.5,
"Privacy spending for PCA, used if accountant_type is "
"Amortized.")
tf.flags.DEFINE_float("pca_delta", 0.005,
"Privacy spending for PCA, used if accountant_type is "
"Amortized.")
tf.flags.DEFINE_float("pca_sigma", 7.0,
"Noise sigma for PCA, used if accountant_type is Moments")
tf.flags.DEFINE_string("target_eps", "0.125,0.25,0.5,1,2,4,8",
"Log the privacy loss for the target epsilon's. Only "
"used when accountant_type is Moments.")
tf.flags.DEFINE_float("target_delta", 1e-5,
"Maximum delta for --terminate_based_on_privacy.")
tf.flags.DEFINE_bool("terminate_based_on_privacy", False,
"Stop training if privacy spent exceeds "
"(max(--target_eps), --target_delta), even "
"if --num_training_steps have not yet been completed.")
tf.flags.DEFINE_string("save_path", "/tmp/mnist_dir",
"Directory for saving model outputs.")
FLAGS = tf.flags.FLAGS
NUM_TRAINING_IMAGES = 60000
NUM_TESTING_IMAGES = 10000
IMAGE_SIZE = 28
def MnistInput(mnist_data_file, batch_size, randomize):
"""Create operations to read the MNIST input file.
Args:
mnist_data_file: Path of a file containing the MNIST images to process.
batch_size: size of the mini batches to generate.
randomize: If true, randomize the dataset.
Returns:
images: A tensor with the formatted image data. shape [batch_size, 28*28]
labels: A tensor with the labels for each image. shape [batch_size]
"""
file_queue = tf.train.string_input_producer([mnist_data_file])
reader = tf.TFRecordReader()
_, value = reader.read(file_queue)
example = tf.parse_single_example(
value,
features={"image/encoded": tf.FixedLenFeature(shape=(), dtype=tf.string),
"image/class/label": tf.FixedLenFeature([1], tf.int64)})
image = tf.cast(tf.image.decode_png(example["image/encoded"], channels=1),
tf.float32)
image = tf.reshape(image, [IMAGE_SIZE * IMAGE_SIZE])
image /= 255
label = tf.cast(example["image/class/label"], dtype=tf.int32)
label = tf.reshape(label, [])
if randomize:
images, labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size,
capacity=(batch_size * 100),
min_after_dequeue=(batch_size * 10))
else:
images, labels = tf.train.batch([image, label], batch_size=batch_size)
return images, labels
def Eval(mnist_data_file, network_parameters, num_testing_images,
randomize, load_path, save_mistakes=False):
"""Evaluate MNIST for a number of steps.
Args:
mnist_data_file: Path of a file containing the MNIST images to process.
network_parameters: parameters for defining and training the network.
num_testing_images: the number of images we will evaluate on.
randomize: if false, randomize; otherwise, read the testing images
sequentially.
load_path: path where to load trained parameters from.
save_mistakes: save the mistakes if True.
Returns:
The evaluation accuracy as a float.
"""
batch_size = 100
# Like for training, we need a session for executing the TensorFlow graph.
with tf.Graph().as_default(), tf.Session() as sess:
# Create the basic Mnist model.
images, labels = MnistInput(mnist_data_file, batch_size, randomize)
logits, _, _ = utils.BuildNetwork(images, network_parameters)
softmax = tf.nn.softmax(logits)
# Load the variables.
ckpt_state = tf.train.get_checkpoint_state(load_path)
if not (ckpt_state and ckpt_state.model_checkpoint_path):
raise ValueError("No model checkpoint to eval at %s\n" % load_path)
saver = tf.train.Saver()
saver.restore(sess, ckpt_state.model_checkpoint_path)
coord = tf.train.Coordinator()
_ = tf.train.start_queue_runners(sess=sess, coord=coord)
total_examples = 0
correct_predictions = 0
image_index = 0
mistakes = []
for _ in xrange((num_testing_images + batch_size - 1) // batch_size):
predictions, label_values = sess.run([softmax, labels])
# Count how many were predicted correctly.
for prediction, label_value in zip(predictions, label_values):
total_examples += 1
if np.argmax(prediction) == label_value:
correct_predictions += 1
elif save_mistakes:
mistakes.append({"index": image_index,
"label": label_value,
"pred": np.argmax(prediction)})
image_index += 1
return (correct_predictions / total_examples,
mistakes if save_mistakes else None)
def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
save_path, eval_steps=0):
"""Train MNIST for a number of steps.
Args:
mnist_train_file: path of MNIST train data file.
mnist_test_file: path of MNIST test data file.
network_parameters: parameters for defining and training the network.
num_steps: number of steps to run. Here steps = lots
save_path: path where to save trained parameters.
eval_steps: evaluate the model every eval_steps.
Returns:
the result after the final training step.
Raises:
ValueError: if the accountant_type is not supported.
"""
batch_size = FLAGS.batch_size
params = {"accountant_type": FLAGS.accountant_type,
"task_id": 0,
"batch_size": FLAGS.batch_size,
"projection_dimensions": FLAGS.projection_dimensions,
"default_gradient_l2norm_bound":
network_parameters.default_gradient_l2norm_bound,
"num_hidden_layers": FLAGS.num_hidden_layers,
"hidden_layer_num_units": FLAGS.hidden_layer_num_units,
"num_examples": NUM_TRAINING_IMAGES,
"learning_rate": FLAGS.lr,
"end_learning_rate": FLAGS.end_lr,
"learning_rate_saturate_epochs": FLAGS.lr_saturate_epochs
}
# Log different privacy parameters dependent on the accountant type.
if FLAGS.accountant_type == "Amortized":
params.update({"flag_eps": FLAGS.eps,
"flag_delta": FLAGS.delta,
"flag_pca_eps": FLAGS.pca_eps,
"flag_pca_delta": FLAGS.pca_delta,
})
elif FLAGS.accountant_type == "Moments":
params.update({"sigma": FLAGS.sigma,
"pca_sigma": FLAGS.pca_sigma,
})
with tf.Graph().as_default(), tf.Session() as sess, tf.device('/cpu:0'):
# Create the basic Mnist model.
images, labels = MnistInput(mnist_train_file, batch_size, FLAGS.randomize)
logits, projection, training_params = utils.BuildNetwork(
images, network_parameters)
cost = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.one_hot(labels, 10))
# The actual cost is the average across the examples.
cost = tf.reduce_sum(cost, [0]) / batch_size
if FLAGS.accountant_type == "Amortized":
priv_accountant = accountant.AmortizedAccountant(NUM_TRAINING_IMAGES)
sigma = None
pca_sigma = None
with_privacy = FLAGS.eps > 0
elif FLAGS.accountant_type == "Moments":
priv_accountant = accountant.GaussianMomentsAccountant(
NUM_TRAINING_IMAGES)
sigma = FLAGS.sigma
pca_sigma = FLAGS.pca_sigma
with_privacy = FLAGS.sigma > 0
else:
raise ValueError("Undefined accountant type, needs to be "
"Amortized or Moments, but got %s" % FLAGS.accountant)
# Note: Here and below, we scale down the l2norm_bound by
# batch_size. This is because per_example_gradients computes the
# gradient of the minibatch loss with respect to each individual
# example, and the minibatch loss (for our model) is the *average*
# loss over examples in the minibatch. Hence, the scale of the
# per-example gradients goes like 1 / batch_size.
gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
priv_accountant,
[network_parameters.default_gradient_l2norm_bound / batch_size, True])
for var in training_params:
if "gradient_l2norm_bound" in training_params[var]:
l2bound = training_params[var]["gradient_l2norm_bound"] / batch_size
gaussian_sanitizer.set_option(var,
sanitizer.ClipOption(l2bound, True))
lr = tf.placeholder(tf.float32)
eps = tf.placeholder(tf.float32)
delta = tf.placeholder(tf.float32)
init_ops = []
if network_parameters.projection_type == "PCA":
with tf.variable_scope("pca"):
# Compute differentially private PCA.
all_data, _ = MnistInput(mnist_train_file, NUM_TRAINING_IMAGES, False)
pca_projection = dp_pca.ComputeDPPrincipalProjection(
all_data, network_parameters.projection_dimensions,
gaussian_sanitizer, [FLAGS.pca_eps, FLAGS.pca_delta], pca_sigma)
assign_pca_proj = tf.assign(projection, pca_projection)
init_ops.append(assign_pca_proj)
# Add global_step
global_step = tf.Variable(0, dtype=tf.int32, trainable=False,
name="global_step")
if with_privacy:
gd_op = dp_optimizer.DPGradientDescentOptimizer(
lr,
[eps, delta],
gaussian_sanitizer,
sigma=sigma,
batches_per_lot=FLAGS.batches_per_lot).minimize(
cost, global_step=global_step)
else:
gd_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)
saver = tf.train.Saver()
coord = tf.train.Coordinator()
_ = tf.train.start_queue_runners(sess=sess, coord=coord)
# We need to maintain the intialization sequence.
for v in tf.trainable_variables():
sess.run(tf.variables_initializer([v]))
sess.run(tf.global_variables_initializer())
sess.run(init_ops)
results = []
start_time = time.time()
prev_time = start_time
filename = "results-0.json"
log_path = os.path.join(save_path, filename)
target_eps = [float(s) for s in FLAGS.target_eps.split(",")]
if FLAGS.accountant_type == "Amortized":
# Only matters if --terminate_based_on_privacy is true.
target_eps = [max(target_eps)]
max_target_eps = max(target_eps)
lot_size = FLAGS.batches_per_lot * FLAGS.batch_size
lots_per_epoch = NUM_TRAINING_IMAGES / lot_size
for step in xrange(num_steps):
epoch = step / lots_per_epoch
curr_lr = utils.VaryRate(FLAGS.lr, FLAGS.end_lr,
FLAGS.lr_saturate_epochs, epoch)
curr_eps = utils.VaryRate(FLAGS.eps, FLAGS.end_eps,
FLAGS.eps_saturate_epochs, epoch)
for _ in xrange(FLAGS.batches_per_lot):
_ = sess.run(
[gd_op], feed_dict={lr: curr_lr, eps: curr_eps, delta: FLAGS.delta})
sys.stderr.write("step: %d\n" % step)
# See if we should stop training due to exceeded privacy budget:
should_terminate = False
terminate_spent_eps_delta = None
if with_privacy and FLAGS.terminate_based_on_privacy:
terminate_spent_eps_delta = priv_accountant.get_privacy_spent(
sess, target_eps=[max_target_eps])[0]
# For the Moments accountant, we should always have
# spent_eps == max_target_eps.
if (terminate_spent_eps_delta.spent_delta > FLAGS.target_delta or
terminate_spent_eps_delta.spent_eps > max_target_eps):
should_terminate = True
if (eval_steps > 0 and (step + 1) % eval_steps == 0) or should_terminate:
if with_privacy:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_eps=target_eps)
else:
spent_eps_deltas = [accountant.EpsDelta(0, 0)]
for spent_eps, spent_delta in spent_eps_deltas:
sys.stderr.write("spent privacy: eps %.4f delta %.5g\n" % (
spent_eps, spent_delta))
saver.save(sess, save_path=save_path + "/ckpt")
train_accuracy, _ = Eval(mnist_train_file, network_parameters,
num_testing_images=NUM_TESTING_IMAGES,
randomize=True, load_path=save_path)
sys.stderr.write("train_accuracy: %.2f\n" % train_accuracy)
test_accuracy, mistakes = Eval(mnist_test_file, network_parameters,
num_testing_images=NUM_TESTING_IMAGES,
randomize=False, load_path=save_path,
save_mistakes=FLAGS.save_mistakes)
sys.stderr.write("eval_accuracy: %.2f\n" % test_accuracy)
curr_time = time.time()
elapsed_time = curr_time - prev_time
prev_time = curr_time
results.append({"step": step+1, # Number of lots trained so far.
"elapsed_secs": elapsed_time,
"spent_eps_deltas": spent_eps_deltas,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"mistakes": mistakes})
loginfo = {"elapsed_secs": curr_time-start_time,
"spent_eps_deltas": spent_eps_deltas,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"num_training_steps": step+1, # Steps so far.
"mistakes": mistakes,
"result_series": results}
loginfo.update(params)
if log_path:
with tf.gfile.Open(log_path, "w") as f:
json.dump(loginfo, f, indent=2)
f.write("\n")
f.close()
if should_terminate:
break
def main(_):
network_parameters = utils.NetworkParameters()
# If the ASCII proto isn't specified, then construct a config protobuf based
# on 3 flags.
network_parameters.input_size = IMAGE_SIZE ** 2
network_parameters.default_gradient_l2norm_bound = (
FLAGS.default_gradient_l2norm_bound)
if FLAGS.projection_dimensions > 0 and FLAGS.num_conv_layers > 0:
raise ValueError("Currently you can't do PCA and have convolutions"
"at the same time. Pick one")
# could add support for PCA after convolutions.
# Currently BuildNetwork can build the network with conv followed by
# projection, but the PCA training works on data, rather than data run
# through a few layers. Will need to init the convs before running the
# PCA, and need to change the PCA subroutine to take a network and perhaps
# allow for batched inputs, to handle larger datasets.
if FLAGS.num_conv_layers > 0:
conv = utils.ConvParameters()
conv.name = "conv1"
conv.in_channels = 1
conv.out_channels = 128
conv.num_outputs = 128 * 14 * 14
network_parameters.conv_parameters.append(conv)
# defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2.
# insize 28x28, bias, stddev 0.1, non-trainable.
if FLAGS.num_conv_layers > 1:
conv = network_parameters.ConvParameters()
conv.name = "conv2"
conv.in_channels = 128
conv.out_channels = 128
conv.num_outputs = 128 * 7 * 7
conv.in_size = 14
# defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2.
# bias, stddev 0.1, non-trainable.
network_parameters.conv_parameters.append(conv)
if FLAGS.num_conv_layers > 2:
raise ValueError("Currently --num_conv_layers must be 0,1 or 2."
"Manually create a network_parameters proto for more.")
if FLAGS.projection_dimensions > 0:
network_parameters.projection_type = "PCA"
network_parameters.projection_dimensions = FLAGS.projection_dimensions
for i in xrange(FLAGS.num_hidden_layers):
hidden = utils.LayerParameters()
hidden.name = "hidden%d" % i
hidden.num_units = FLAGS.hidden_layer_num_units
hidden.relu = True
hidden.with_bias = False
hidden.trainable = not FLAGS.freeze_bottom_layers
network_parameters.layer_parameters.append(hidden)
logits = utils.LayerParameters()
logits.name = "logits"
logits.num_units = 10
logits.relu = False
logits.with_bias = False
network_parameters.layer_parameters.append(logits)
Train(FLAGS.training_data_path,
FLAGS.eval_data_path,
network_parameters,
FLAGS.num_training_steps,
FLAGS.save_path,
eval_steps=FLAGS.eval_steps)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
eranchetz/nupic | nupic/math/roc_utils.py | 49 | 8308 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utility functions to compute ROC (Receiver Operator Characteristic) curves
and AUC (Area Under the Curve).
The ROCCurve() and AreaUnderCurve() functions are based on the roc_curve()
and auc() functions found in metrics.py module of scikit-learn
(http://scikit-learn.org/stable/). Scikit-learn has a BSD license (3 clause).
Following is the original license/credits statement from the top of the
metrics.py file:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD Style.
"""
import numpy as np
def ROCCurve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on y_score used to compute fpr and tpr
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = np.ravel(y_true)
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = np.ravel(y_score)
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def _printNPArray(x, precision=2):
format = "%%.%df" % (precision)
for elem in x:
print format % (elem),
print
def _test():
"""
This is a toy example, to show the basic functionality:
The dataset is:
actual prediction
-------------------------
0 0.1
0 0.4
1 0.5
1 0.3
1 0.45
Some ROC terminology:
A True Positive (TP) is when we predict TRUE and the actual value is 1.
A False Positive (FP) is when we predict TRUE, but the actual value is 0.
The True Positive Rate (TPR) is TP/P, where P is the total number of actual
positives (3 in this example, the last 3 samples).
The False Positive Rate (FPR) is FP/N, where N is the total number of actual
negatives (2 in this example, the first 2 samples)
Here are the classifications at various choices for the threshold. The
prediction is TRUE if the predicted value is >= threshold and FALSE otherwise.
actual pred 0.50 0.45 0.40 0.30 0.10
---------------------------------------------------------
0 0.1 0 0 0 0 1
0 0.4 0 0 1 1 1
1 0.5 1 1 1 1 1
1 0.3 0 0 0 1 1
1 0.45 0 1 1 1 1
TruePos(TP) 1 2 2 3 3
FalsePos(FP) 0 0 1 1 2
TruePosRate(TPR) 1/3 2/3 2/3 3/3 3/3
FalsePosRate(FPR) 0/2 0/2 1/2 1/2 2/2
The ROC curve is a plot of FPR on the x-axis and TPR on the y-axis. Basically,
one can pick any operating point along this curve to run, the operating point
determined by which threshold you want to use. By changing the threshold, you
tradeoff TP's for FPs.
The more area under this curve, the better the classification algorithm is.
The AreaUnderCurve() function can be used to compute the area under this
curve.
"""
yTrue = np.array([0, 0, 1, 1, 1])
yScore = np.array([0.1, 0.4, 0.5, 0.3, 0.45])
(fpr, tpr, thresholds) = ROCCurve(yTrue, yScore)
print "Actual: ",
_printNPArray(yTrue)
print "Predicted: ",
_printNPArray(yScore)
print
print "Thresholds:",
_printNPArray(thresholds[::-1])
print "FPR(x): ",
_printNPArray(fpr)
print "TPR(y): ",
_printNPArray(tpr)
print
area = AreaUnderCurve(fpr, tpr)
print "AUC: ", area
if __name__=='__main__':
_test()
| agpl-3.0 |
stefrobb/namebench | nb_third_party/dns/rrset.py | 215 | 5866 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS RRsets (an RRset is a named rdataset)"""
import dns.name
import dns.rdataset
import dns.rdataclass
import dns.renderer
class RRset(dns.rdataset.Rdataset):
"""A DNS RRset (named rdataset).
RRset inherits from Rdataset, and RRsets can be treated as
Rdatasets in most cases. There are, however, a few notable
exceptions. RRsets have different to_wire() and to_text() method
arguments, reflecting the fact that RRsets always have an owner
name.
"""
__slots__ = ['name', 'deleting']
def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
deleting=None):
"""Create a new RRset."""
super(RRset, self).__init__(rdclass, rdtype)
self.name = name
self.deleting = deleting
def _clone(self):
obj = super(RRset, self)._clone()
obj.name = self.name
obj.deleting = self.deleting
return obj
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
if not self.deleting is None:
dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
else:
dtext = ''
return '<DNS ' + str(self.name) + ' ' + \
dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two RRsets are equal if they have the same name and the same
rdataset
@rtype: bool"""
if not isinstance(other, RRset):
return False
if self.name != other.name:
return False
return super(RRset, self).__eq__(other)
def match(self, name, rdclass, rdtype, covers, deleting=None):
"""Returns True if this rrset matches the specified class, type,
covers, and deletion state."""
if not super(RRset, self).match(rdclass, rdtype, covers):
return False
if self.name != name or self.deleting != deleting:
return False
return True
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the RRset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
return super(RRset, self).to_text(self.name, origin, relativize,
self.deleting, **kw)
def to_wire(self, file, compress=None, origin=None, **kw):
"""Convert the RRset to wire format."""
return super(RRset, self).to_wire(self.name, file, compress, origin,
self.deleting, **kw)
def to_rdataset(self):
"""Convert an RRset into an Rdataset.
@rtype: dns.rdataset.Rdataset object
"""
return dns.rdataset.from_rdata_list(self.ttl, list(self))
def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type, and with
the specified list of rdatas in text format.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
r = RRset(name, rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type and with
the specified rdatas in text format.
@rtype: dns.rrset.RRset object
"""
return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
def from_rdata_list(name, ttl, rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified list of rdata objects.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = RRset(name, rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(name, ttl, *rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified rdata objects.
@rtype: dns.rrset.RRset object
"""
return from_rdata_list(name, ttl, rdatas)
| apache-2.0 |
PatrickOReilly/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 164 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
ray-project/ray | doc/source/ray-overview/doc_test/ray_train.py | 1 | 1118 | import torch
import ray.train as train
from ray.train.torch import TorchTrainer, TorchCheckpoint
from ray.air import ScalingConfig, session
def train_func():
# Setup model.
model = torch.nn.Linear(1, 1)
model = train.torch.prepare_model(model)
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)
# Setup data.
input = torch.randn(1000, 1)
labels = input * 2
dataset = torch.utils.data.TensorDataset(input, labels)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
dataloader = train.torch.prepare_data_loader(dataloader)
# Train.
for _ in range(5):
for X, y in dataloader:
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
session.report({"loss": loss.item()})
session.report({}, checkpoint=TorchCheckpoint.from_model(model))
trainer = TorchTrainer(train_func, scaling_config=ScalingConfig(num_workers=4))
results = trainer.fit()
print(results.metrics)
print(results.checkpoint)
| apache-2.0 |
ray-project/ray | python/ray/util/collective/tests/distributed_multigpu_tests/test_distributed_multigpu_allreduce.py | 1 | 6660 | """Test the collective allreduice API on a distributed Ray cluster."""
import pytest
import logging
import cupy as cp
import ray
from ray.util.collective.types import ReduceOp
from ray.util.collective.tests.util import create_collective_multigpu_workers
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
@pytest.mark.parametrize("group_name", ["default", "test", "123?34!"])
def test_allreduce_multigpu_different_name(
ray_start_distributed_multigpu_2_nodes_4_gpus, group_name
):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(
num_workers=world_size, group_name=group_name
)
results = ray.get([a.do_allreduce_multigpu.remote(group_name) for a in actors])
assert (results[0] == cp.ones((10,), dtype=cp.float32) * actual_world_size).all()
assert (results[1] == cp.ones((10,), dtype=cp.float32) * actual_world_size).all()
@pytest.mark.parametrize("array_size", [2, 2**5, 2**10, 2**15, 2**20])
def test_allreduce_multigpu_different_array_size(
ray_start_distributed_multigpu_2_nodes_4_gpus, array_size
):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
ray.get([a.set_buffer.remote(array_size) for a in actors])
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
assert (
results[0] == cp.ones((array_size,), dtype=cp.float32) * actual_world_size
).all()
assert (
results[1] == cp.ones((array_size,), dtype=cp.float32) * actual_world_size
).all()
def test_allreduce_multigpu_destroy(
ray_start_distributed_multigpu_2_nodes_4_gpus, backend="nccl", group_name="default"
):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
assert (results[0] == cp.ones((10,), dtype=cp.float32) * actual_world_size).all()
assert (results[1] == cp.ones((10,), dtype=cp.float32) * actual_world_size).all()
# destroy the group and try do work, should fail
ray.get([a.destroy_group.remote() for a in actors])
with pytest.raises(RuntimeError):
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
# reinit the same group and all reduce
ray.get(
[
actor.init_group.remote(world_size, i, backend, group_name)
for i, actor in enumerate(actors)
]
)
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
assert (
results[0]
== cp.ones((10,), dtype=cp.float32) * actual_world_size * actual_world_size
).all()
assert (
results[1]
== cp.ones((10,), dtype=cp.float32) * actual_world_size * actual_world_size
).all()
def test_allreduce_multigpu_multiple_group(
ray_start_distributed_multigpu_2_nodes_4_gpus, backend="nccl", num_groups=5
):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
for group_name in range(1, num_groups):
ray.get(
[
actor.init_group.remote(world_size, i, backend, str(group_name))
for i, actor in enumerate(actors)
]
)
for i in range(num_groups):
group_name = "default" if i == 0 else str(i)
results = ray.get([a.do_allreduce_multigpu.remote(group_name) for a in actors])
assert (
results[0]
== cp.ones((10,), dtype=cp.float32) * (actual_world_size ** (i + 1))
).all()
def test_allreduce_multigpu_different_op(ray_start_distributed_multigpu_2_nodes_4_gpus):
world_size = 2
actors, _ = create_collective_multigpu_workers(world_size)
# check product
ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3))
ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5))
results = ray.get(
[a.do_allreduce_multigpu.remote(op=ReduceOp.PRODUCT) for a in actors]
)
assert (results[0] == cp.ones((10,), dtype=cp.float32) * 120).all()
assert (results[1] == cp.ones((10,), dtype=cp.float32) * 120).all()
# check min
ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3))
ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5))
results = ray.get([a.do_allreduce_multigpu.remote(op=ReduceOp.MIN) for a in actors])
assert (results[0] == cp.ones((10,), dtype=cp.float32) * 2).all()
assert (results[1] == cp.ones((10,), dtype=cp.float32) * 2).all()
# check max
ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3))
ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5))
results = ray.get([a.do_allreduce_multigpu.remote(op=ReduceOp.MAX) for a in actors])
assert (results[0] == cp.ones((10,), dtype=cp.float32) * 5).all()
assert (results[1] == cp.ones((10,), dtype=cp.float32) * 5).all()
@pytest.mark.parametrize("dtype", [cp.uint8, cp.float16, cp.float32, cp.float64])
def test_allreduce_multigpu_different_dtype(
ray_start_distributed_multigpu_2_nodes_4_gpus, dtype
):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
ray.get([a.set_buffer.remote([10], dtype=dtype) for a in actors])
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
assert (results[0] == cp.ones((10,), dtype=dtype) * actual_world_size).all()
assert (results[1] == cp.ones((10,), dtype=dtype) * actual_world_size).all()
def test_allreduce_torch_cupy(ray_start_distributed_multigpu_2_nodes_4_gpus):
# import torch
world_size = 2
actual_world_size = 4
actors, _ = create_collective_multigpu_workers(world_size)
ray.get(actors[0].set_buffer.remote([10]))
ray.get(
actors[1].set_buffer.remote([10], tensor_type0="torch", tensor_type1="torch")
)
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
assert (results[0] == cp.ones((10,)) * actual_world_size).all()
ray.get(
actors[0].set_buffer.remote([10], tensor_type0="cupy", tensor_type1="torch")
)
ray.get(
actors[1].set_buffer.remote([10], tensor_type0="torch", tensor_type1="cupy")
)
results = ray.get([a.do_allreduce_multigpu.remote() for a in actors])
assert (results[0] == cp.ones((10,)) * actual_world_size).all()
| apache-2.0 |
ChampionZP/DeepLearningImplementations | DenseNet/run_cifar10.py | 1 | 7187 | from __future__ import print_function
import os
import time
import json
import argparse
import densenet
import numpy as np
import keras.backend as K
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.utils import np_utils
def run_cifar10(batch_size,
nb_epoch,
depth,
nb_dense_block,
nb_filter,
growth_rate,
dropout_rate,
learning_rate,
weight_decay,
plot_architecture):
""" Run CIFAR10 experiments
:param batch_size: int -- batch size
:param nb_epoch: int -- number of training epochs
:param depth: int -- network depth
:param nb_dense_block: int -- number of dense blocks
:param nb_filter: int -- initial number of conv filter
:param growth_rate: int -- number of new filters added by conv layers
:param dropout_rate: float -- dropout rate
:param learning_rate: float -- learning rate
:param weight_decay: float -- weight decay
:param plot_architecture: bool -- whether to plot network architecture
"""
###################
# Data processing #
###################
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = len(np.unique(y_train))
img_dim = X_train.shape[1:]
if K.image_dim_ordering() == "th":
n_channels = X_train.shape[1]
else:
n_channels = X_train.shape[-1]
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Normalisation
X = np.vstack((X_train, X_test))
# 2 cases depending on the image ordering
if K.image_dim_ordering() == "th":
for i in range(n_channels):
mean = np.mean(X[:, i, :, :])
std = np.std(X[:, i, :, :])
X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std
elif K.image_dim_ordering() == "tf":
for i in range(n_channels):
mean = np.mean(X[:, :, :, i])
std = np.std(X[:, :, :, i])
X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std
###################
# Construct model #
###################
model = densenet.DenseNet(nb_classes,
img_dim,
depth,
nb_dense_block,
growth_rate,
nb_filter,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# Model output
model.summary()
# Build optimizer
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=["accuracy"])
if plot_architecture:
from keras.utils.visualize_util import plot
plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)
####################
# Network training #
####################
print("Training")
list_train_loss = []
list_test_loss = []
list_learning_rate = []
for e in range(nb_epoch):
if e == int(0.5 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))
if e == int(0.75 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))
split_size = batch_size
num_splits = X_train.shape[0] / split_size
arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)
l_train_loss = []
start = time.time()
for batch_idx in arr_splits:
X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)
l_train_loss.append([train_logloss, train_acc])
test_logloss, test_acc = model.evaluate(X_test,
Y_test,
verbose=0,
batch_size=64)
list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
list_test_loss.append([test_logloss, test_acc])
list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
# to convert numpy array to json serializable
print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))
d_log = {}
d_log["batch_size"] = batch_size
d_log["nb_epoch"] = nb_epoch
d_log["optimizer"] = opt.get_config()
d_log["train_loss"] = list_train_loss
d_log["test_loss"] = list_test_loss
d_log["learning_rate"] = list_learning_rate
json_file = os.path.join('./log/experiment_log_cifar10.json')
with open(json_file, 'w') as fp:
json.dump(d_log, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run CIFAR10 experiment')
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size')
parser.add_argument('--nb_epoch', default=30, type=int,
help='Number of epochs')
parser.add_argument('--depth', type=int, default=7,
help='Network depth')
parser.add_argument('--nb_dense_block', type=int, default=1,
help='Number of dense blocks')
parser.add_argument('--nb_filter', type=int, default=16,
help='Initial number of conv filters')
parser.add_argument('--growth_rate', type=int, default=12,
help='Number of new filters added by conv layers')
parser.add_argument('--dropout_rate', type=float, default=0.2,
help='Dropout rate')
parser.add_argument('--learning_rate', type=float, default=1E-3,
help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=1E-4,
help='L2 regularization on weights')
parser.add_argument('--plot_architecture', type=bool, default=False,
help='Save a plot of the network architecture')
args = parser.parse_args()
print("Network configuration:")
for name, value in parser.parse_args()._get_kwargs():
print(name, value)
list_dir = ["./log", "./figures"]
for d in list_dir:
if not os.path.exists(d):
os.makedirs(d)
run_cifar10(args.batch_size,
args.nb_epoch,
args.depth,
args.nb_dense_block,
args.nb_filter,
args.growth_rate,
args.dropout_rate,
args.learning_rate,
args.weight_decay,
args.plot_architecture)
| mit |
ray-project/ray | rllib/models/torch/modules/noisy_layer.py | 1 | 3406 | import numpy as np
from ray.rllib.models.utils import get_activation_fn
from ray.rllib.utils.framework import try_import_torch, TensorType
torch, nn = try_import_torch()
class NoisyLayer(nn.Module):
r"""A Layer that adds learnable Noise to some previous layer's outputs.
Consists of:
- a common dense layer: y = w^{T}x + b
- a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
(b+\epsilon_b*\sigma_b)
, where \epsilon are random variables sampled from factorized normal
distributions and \sigma are trainable variables which are expected to
vanish along the training procedure.
"""
def __init__(
self, in_size: int, out_size: int, sigma0: float, activation: str = "relu"
):
"""Initializes a NoisyLayer object.
Args:
in_size: Input size for Noisy Layer
out_size: Output size for Noisy Layer
sigma0: Initialization value for sigma_b (bias noise)
activation: Non-linear activation for Noisy Layer
"""
super().__init__()
self.in_size = in_size
self.out_size = out_size
self.sigma0 = sigma0
self.activation = get_activation_fn(activation, framework="torch")
if self.activation is not None:
self.activation = self.activation()
sigma_w = nn.Parameter(
torch.from_numpy(
np.random.uniform(
low=-1.0 / np.sqrt(float(self.in_size)),
high=1.0 / np.sqrt(float(self.in_size)),
size=[self.in_size, out_size],
)
).float()
)
self.register_parameter("sigma_w", sigma_w)
sigma_b = nn.Parameter(
torch.from_numpy(
np.full(
shape=[out_size], fill_value=sigma0 / np.sqrt(float(self.in_size))
)
).float()
)
self.register_parameter("sigma_b", sigma_b)
w = nn.Parameter(
torch.from_numpy(
np.full(
shape=[self.in_size, self.out_size],
fill_value=6 / np.sqrt(float(in_size) + float(out_size)),
)
).float()
)
self.register_parameter("w", w)
b = nn.Parameter(torch.from_numpy(np.zeros([out_size])).float())
self.register_parameter("b", b)
def forward(self, inputs: TensorType) -> TensorType:
epsilon_in = self._f_epsilon(
torch.normal(
mean=torch.zeros([self.in_size]), std=torch.ones([self.in_size])
).to(inputs.device)
)
epsilon_out = self._f_epsilon(
torch.normal(
mean=torch.zeros([self.out_size]), std=torch.ones([self.out_size])
).to(inputs.device)
)
epsilon_w = torch.matmul(
torch.unsqueeze(epsilon_in, -1), other=torch.unsqueeze(epsilon_out, 0)
)
epsilon_b = epsilon_out
action_activation = (
torch.matmul(inputs, self.w + self.sigma_w * epsilon_w)
+ self.b
+ self.sigma_b * epsilon_b
)
if self.activation is not None:
action_activation = self.activation(action_activation)
return action_activation
def _f_epsilon(self, x: TensorType) -> TensorType:
return torch.sign(x) * torch.pow(torch.abs(x), 0.5)
| apache-2.0 |
cainiaocome/scikit-learn | sklearn/lda.py | 55 | 17706 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
AllenDowney/ThinkBayes2 | scripts/species.py | 1 | 52932 | """This file contains code used in "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import matplotlib.pyplot as pyplot
import thinkplot
import numpy
import csv
import random
import shelve
import sys
import time
import thinkbayes2
import warnings
warnings.simplefilter('error', RuntimeWarning)
FORMATS = ['pdf', 'eps', 'png']
class Locker(object):
"""Encapsulates a shelf for storing key-value pairs."""
def __init__(self, shelf_file):
self.shelf = shelve.open(shelf_file)
def Close(self):
"""Closes the shelf.
"""
self.shelf.close()
def Add(self, key, value):
"""Adds a key-value pair."""
self.shelf[str(key)] = value
def Lookup(self, key):
"""Looks up a key."""
return self.shelf.get(str(key))
def Keys(self):
"""Returns an iterator of keys."""
return self.shelf.iterkeys()
def Read(self):
"""Returns the contents of the shelf as a map."""
return dict(self.shelf)
class Subject(object):
"""Represents a subject from the belly button study."""
def __init__(self, code):
"""
code: string ID
species: sequence of (int count, string species) pairs
"""
self.code = code
self.species = []
self.suite = None
self.num_reads = None
self.num_species = None
self.total_reads = None
self.total_species = None
self.prev_unseen = None
self.pmf_n = None
self.pmf_q = None
self.pmf_l = None
def Add(self, species, count):
"""Add a species-count pair.
It is up to the caller to ensure that species names are unique.
species: string species/genus name
count: int number of individuals
"""
self.species.append((count, species))
def Done(self, reverse=False, clean_param=0):
"""Called when we are done adding species counts.
reverse: which order to sort in
"""
if clean_param:
self.Clean(clean_param)
self.species.sort(reverse=reverse)
counts = self.GetCounts()
self.num_species = len(counts)
self.num_reads = sum(counts)
def Clean(self, clean_param=50):
"""Identifies and removes bogus data.
clean_param: parameter that controls the number of legit species
"""
def prob_bogus(k, r):
"""Compute the probability that a species is bogus."""
q = clean_param / r
p = (1-q) ** k
return p
print(self.code, clean_param)
counts = self.GetCounts()
r = 1.0 * sum(counts)
species_seq = []
for k, species in sorted(self.species):
if random.random() < prob_bogus(k, r):
continue
species_seq.append((k, species))
self.species = species_seq
def GetM(self):
"""Gets number of observed species."""
return len(self.species)
def GetCounts(self):
"""Gets the list of species counts
Should be in increasing order, if Sort() has been invoked.
"""
return [count for count, _ in self.species]
def MakeCdf(self):
"""Makes a CDF of total prevalence vs rank."""
counts = self.GetCounts()
counts.sort(reverse=True)
cdf = thinkbayes2.Cdf(dict(enumerate(counts)))
return cdf
def GetNames(self):
"""Gets the names of the seen species."""
return [name for _, name in self.species]
def PrintCounts(self):
"""Prints the counts and species names."""
for count, name in reversed(self.species):
print(count, name)
def GetSpecies(self, index):
"""Gets the count and name of the indicated species.
Returns: count-species pair
"""
return self.species[index]
def GetCdf(self):
"""Returns cumulative prevalence vs number of species.
"""
counts = self.GetCounts()
items = enumerate(counts)
cdf = thinkbayes2.Cdf(items)
return cdf
def GetPrevalences(self):
"""Returns a sequence of prevalences (normalized counts).
"""
counts = self.GetCounts()
total = sum(counts)
prevalences = numpy.array(counts, dtype=numpy.float) / total
return prevalences
def Process(self, low=None, high=500, conc=1, iters=100):
"""Computes the posterior distribution of n and the prevalences.
Sets attribute: self.suite
low: minimum number of species
high: maximum number of species
conc: concentration parameter
iters: number of iterations to use in the estimator
"""
counts = self.GetCounts()
m = len(counts)
if low is None:
low = max(m, 2)
ns = range(low, high+1)
#start = time.time()
self.suite = Species5(ns, conc=conc, iters=iters)
self.suite.Update(counts)
#end = time.time()
#print 'Processing time' end-start
def MakePrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attributes
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
curves = self.RunSimulations(num_sims, add_reads)
self.pmf_l = self.MakePredictive(curves)
def MakeQuickPrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attribute:
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
pmf = thinkbayes2.Pmf()
_, seen = self.GetSeenSpecies()
for _ in range(num_sims):
_, observations = self.GenerateObservations(add_reads)
all_seen = seen.union(observations)
l = len(all_seen) - len(seen)
pmf.Incr(l)
pmf.Normalize()
self.pmf_l = pmf
def DistL(self):
"""Returns the distribution of additional species, l.
"""
return self.pmf_l
def MakeFigures(self):
"""Makes figures showing distribution of n and the prevalences."""
self.PlotDistN()
self.PlotPrevalences()
def PlotDistN(self):
"""Plots distribution of n."""
pmf = self.suite.DistN()
print('90% CI for N:', pmf.CredibleInterval(90))
pmf.label = self.code
thinkplot.Clf()
thinkplot.PrePlot(num=1)
thinkplot.Pmf(pmf)
root = 'species-ndist-%s' % self.code
thinkplot.Save(root=root,
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def PlotPrevalences(self, num=5):
"""Plots dist of prevalence for several species.
num: how many species (starting with the highest prevalence)
"""
thinkplot.Clf()
thinkplot.PrePlot(num=5)
for rank in range(1, num+1):
self.PlotPrevalence(rank)
root = 'species-prev-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 1],
)
def PlotPrevalence(self, rank=1, cdf_flag=True):
"""Plots dist of prevalence for one species.
rank: rank order of the species to plot.
cdf_flag: whether to plot the CDF
"""
# convert rank to index
index = self.GetM() - rank
_, mix = self.suite.DistOfPrevalence(index)
count, _ = self.GetSpecies(index)
mix.label = '%d (%d)' % (rank, count)
print('90%% CI for prevalence of species %d:' % rank, end=' ')
print(mix.CredibleInterval(90))
if cdf_flag:
cdf = mix.MakeCdf()
thinkplot.Cdf(cdf)
else:
thinkplot.Pmf(mix)
def PlotMixture(self, rank=1):
"""Plots dist of prevalence for all n, and the mix.
rank: rank order of the species to plot
"""
# convert rank to index
index = self.GetM() - rank
print(self.GetSpecies(index))
print(self.GetCounts()[index])
metapmf, mix = self.suite.DistOfPrevalence(index)
thinkplot.Clf()
for pmf in metapmf.Values():
thinkplot.Pmf(pmf, color='blue', alpha=0.2, linewidth=0.5)
thinkplot.Pmf(mix, color='blue', alpha=0.9, linewidth=2)
root = 'species-mix-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 0.3],
legend=False)
def GetSeenSpecies(self):
"""Makes a set of the names of seen species.
Returns: number of species, set of string species names
"""
names = self.GetNames()
m = len(names)
seen = set(SpeciesGenerator(names, m))
return m, seen
def GenerateObservations(self, num_reads):
"""Generates a series of random observations.
num_reads: number of reads to generate
Returns: number of species, sequence of string species names
"""
n, prevalences = self.suite.SamplePosterior()
names = self.GetNames()
name_iter = SpeciesGenerator(names, n)
items = zip(name_iter, prevalences)
cdf = thinkbayes2.Cdf(dict(items))
observations = cdf.Sample(num_reads)
#for ob in observations:
# print ob
return n, observations
def Resample(self, num_reads):
"""Choose a random subset of the data (without replacement).
num_reads: number of reads in the subset
"""
t = []
for count, species in self.species:
t.extend([species]*count)
random.shuffle(t)
reads = t[:num_reads]
subject = Subject(self.code)
hist = thinkbayes2.Hist(reads)
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def Match(self, match):
"""Match up a rarefied subject with a complete subject.
match: complete Subject
Assigns attributes:
total_reads:
total_species:
prev_unseen:
"""
self.total_reads = match.num_reads
self.total_species = match.num_species
# compute the prevalence of unseen species (at least approximately,
# based on all species counts in match
_, seen = self.GetSeenSpecies()
seen_total = 0.0
unseen_total = 0.0
for count, species in match.species:
if species in seen:
seen_total += count
else:
unseen_total += count
self.prev_unseen = unseen_total / (seen_total + unseen_total)
def RunSimulation(self, num_reads, frac_flag=False, jitter=0.01):
"""Simulates additional observations and returns a rarefaction curve.
k is the number of additional observations
num_new is the number of new species seen
num_reads: how many new reads to simulate
frac_flag: whether to convert to fraction of species seen
jitter: size of jitter added if frac_flag is true
Returns: list of (k, num_new) pairs
"""
m, seen = self.GetSeenSpecies()
n, observations = self.GenerateObservations(num_reads)
curve = []
for i, obs in enumerate(observations):
seen.add(obs)
if frac_flag:
frac_seen = len(seen) / float(n)
frac_seen += random.uniform(-jitter, jitter)
curve.append((i+1, frac_seen))
else:
num_new = len(seen) - m
curve.append((i+1, num_new))
return curve
def RunSimulations(self, num_sims, num_reads, frac_flag=False):
"""Runs simulations and returns a list of curves.
Each curve is a sequence of (k, num_new) pairs.
num_sims: how many simulations to run
num_reads: how many samples to generate in each simulation
frac_flag: whether to convert num_new to fraction of total
"""
curves = [self.RunSimulation(num_reads, frac_flag)
for _ in range(num_sims)]
return curves
def MakePredictive(self, curves):
"""Makes a predictive distribution of additional species.
curves: list of (k, num_new) curves
Returns: Pmf of num_new
"""
pred = thinkbayes2.Pmf(label=self.code)
for curve in curves:
_, last_num_new = curve[-1]
pred.Incr(last_num_new)
pred.Normalize()
return pred
def MakeConditionals(curves, ks):
"""Makes Cdfs of the distribution of num_new conditioned on k.
curves: list of (k, num_new) curves
ks: list of values of k
Returns: list of Cdfs
"""
joint = MakeJointPredictive(curves)
cdfs = []
for k in ks:
pmf = joint.Conditional(1, 0, k)
pmf.label = 'k=%d' % k
cdf = pmf.MakeCdf()
cdfs.append(cdf)
print('90%% credible interval for %d' % k, end=' ')
print(cdf.CredibleInterval(90))
return cdfs
def MakeJointPredictive(curves):
"""Makes a joint distribution of k and num_new.
curves: list of (k, num_new) curves
Returns: joint Pmf of (k, num_new)
"""
joint = thinkbayes2.Joint()
for curve in curves:
for k, num_new in curve:
joint.Incr((k, num_new))
joint.Normalize()
return joint
def MakeFracCdfs(curves, ks):
"""Makes Cdfs of the fraction of species seen.
curves: list of (k, num_new) curves
Returns: list of Cdfs
"""
d = {}
for curve in curves:
for k, frac in curve:
if k in ks:
d.setdefault(k, []).append(frac)
cdfs = {}
for k, fracs in d.items():
cdf = thinkbayes2.Cdf(fracs)
cdfs[k] = cdf
return cdfs
def SpeciesGenerator(names, num):
"""Generates a series of names, starting with the given names.
Additional names are 'unseen' plus a serial number.
names: list of strings
num: total number of species names to generate
Returns: string iterator
"""
i = 0
for name in names:
yield name
i += 1
while i < num:
yield 'unseen-%d' % i
i += 1
def ReadRarefactedData(filename='journal.pone.0047712.s001.csv',
clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from http://www.plosone.org/article/
info%3Adoi%2F10.1371%2Fjournal.pone.0047712#s4
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
#_ = reader.next()
_ = next(reader)
subject = Subject('')
subject_map = {}
i = 0
for t in reader:
code = t[0]
if code != subject.code:
# start a new subject
subject = Subject(code)
subject_map[code] = subject
# append a number to the species names so they're unique
species = t[1]
species = '%s-%d' % (species, i)
i += 1
count = int(t[2])
subject.Add(species, count)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map
def ReadCompleteDataset(filename='BBB_data_from_Rob.csv', clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from personal correspondence with Rob Dunn, received 2-7-13.
Converted from xlsx to csv.
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
header = next(reader)
header = next(reader)
subject_codes = header[1:-1]
subject_codes = ['B'+code for code in subject_codes]
# create the subject map
uber_subject = Subject('uber')
subject_map = {}
for code in subject_codes:
subject_map[code] = Subject(code)
# read lines
i = 0
for t in reader:
otu_code = t[0]
if otu_code == '':
continue
# pull out a species name and give it a number
otu_names = t[-1]
taxons = otu_names.split(';')
species = taxons[-1]
species = '%s-%d' % (species, i)
i += 1
counts = [int(x) for x in t[1:-1]]
# print otu_code, species
for code, count in zip(subject_codes, counts):
if count > 0:
subject_map[code].Add(species, count)
uber_subject.Add(species, count)
uber_subject.Done(clean_param=clean_param)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map, uber_subject
def JoinSubjects():
"""Reads both datasets and computes their inner join.
Finds all subjects that appear in both datasets.
For subjects in the rarefacted dataset, looks up the total
number of reads and stores it as total_reads. num_reads
is normally 400.
Returns: map from code to Subject
"""
# read the rarefacted dataset
sampled_subjects = ReadRarefactedData()
# read the complete dataset
all_subjects, _ = ReadCompleteDataset()
for code, subject in sampled_subjects.items():
if code in all_subjects:
match = all_subjects[code]
subject.Match(match)
return sampled_subjects
def JitterCurve(curve, dx=0.2, dy=0.3):
"""Adds random noise to the pairs in a curve.
dx and dy control the amplitude of the noise in each dimension.
"""
curve = [(x+random.uniform(-dx, dx),
y+random.uniform(-dy, dy)) for x, y in curve]
return curve
def OffsetCurve(curve, i, n, dx=0.3, dy=0.3):
"""Adds random noise to the pairs in a curve.
i is the index of the curve
n is the number of curves
dx and dy control the amplitude of the noise in each dimension.
"""
xoff = -dx + 2 * dx * i / (n-1)
yoff = -dy + 2 * dy * i / (n-1)
curve = [(x+xoff, y+yoff) for x, y in curve]
return curve
def PlotCurves(curves, root='species-rare'):
"""Plots a set of curves.
curves is a list of curves; each curve is a list of (x, y) pairs.
"""
thinkplot.Clf()
color = '#225EA8'
n = len(curves)
for i, curve in enumerate(curves):
curve = OffsetCurve(curve, i, n)
xs, ys = zip(*curve)
thinkplot.Plot(xs, ys, color=color, alpha=0.3, linewidth=0.5)
thinkplot.Save(root=root,
xlabel='# samples',
ylabel='# species',
formats=FORMATS,
legend=False)
def PlotConditionals(cdfs, root='species-cond'):
"""Plots cdfs of num_new conditioned on k.
cdfs: list of Cdf
root: string filename root
"""
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root=root,
xlabel='# new species',
ylabel='Prob',
formats=FORMATS)
def PlotFracCdfs(cdfs, root='species-frac'):
"""Plots CDFs of the fraction of species seen.
cdfs: map from k to CDF of fraction of species seen after k samples
"""
thinkplot.Clf()
color = '#225EA8'
for k, cdf in cdfs.items():
xs, ys = cdf.Render()
ys = [1-y for y in ys]
thinkplot.Plot(xs, ys, color=color, linewidth=1)
x = 0.9
y = 1 - cdf.Prob(x)
pyplot.text(x, y, str(k), fontsize=9, color=color,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(facecolor='white', edgecolor='none'))
thinkplot.Save(root=root,
xlabel='Fraction of species seen',
ylabel='Probability',
formats=FORMATS,
legend=False)
class Species(thinkbayes2.Suite):
"""Represents hypotheses about the number of species."""
def __init__(self, ns, conc=1, iters=1000):
hypos = [thinkbayes2.Dirichlet(n, conc) for n in ns]
thinkbayes2.Suite.__init__(self, hypos)
self.iters = iters
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
# call Update in the parent class, which calls Likelihood
thinkbayes2.Suite.Update(self, data)
# update the next level of the hierarchy
for hypo in self.Values():
hypo.Update(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
# draw sample Likelihoods from the hypothetical Dirichlet dist
# and add them up
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of ways the observed species
# might have been chosen from all species
m = len(data)
like *= thinkbayes2.BinomialCoef(dirichlet.n, m)
return like
def DistN(self):
"""Computes the distribution of n."""
pmf = thinkbayes2.Pmf()
for hypo, prob in self.Items():
pmf.Set(hypo.n, prob)
return pmf
class Species2(object):
"""Represents hypotheses about the number of species.
Combines two layers of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def __init__(self, ns, conc=1, iters=1000):
self.ns = ns
self.conc = conc
self.probs = numpy.ones(len(ns), dtype=numpy.float)
self.params = numpy.ones(self.ns[-1], dtype=numpy.float) * conc
self.iters = iters
self.num_reads = 0
self.m = 0
def Preload(self, data):
"""Change the initial parameters to fit the data better.
Just an experiment. Doesn't work.
"""
m = len(data)
singletons = data.count(1)
num = m - singletons
print(m, singletons, num)
addend = numpy.ones(num, dtype=numpy.float) * 1
print(len(addend))
print(len(self.params[singletons:m]))
self.params[singletons:m] += addend
print('Preload', num)
def Update(self, data):
"""Updates the distribution based on data.
data: numpy array of counts
"""
self.num_reads += sum(data)
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
self.m = len(data)
#self.params[:self.m] += data * self.conc
self.params[:self.m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data for all values of n.
Draws one sample from the distribution of prevalences.
data: sequence of observed counts
Returns: numpy array of m likelihoods
"""
gammas = numpy.random.gamma(self.params)
m = len(data)
row = gammas[:m]
col = numpy.cumsum(gammas)
log_likes = []
for n in self.ns:
ps = row / col[n-1]
terms = numpy.log(ps) * data
log_like = terms.sum()
log_likes.append(log_like)
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
def DistN(self):
"""Computes the distribution of n.
Returns: new Pmf object
"""
pmf = thinkbayes2.Pmf(dict(zip(self.ns, self.probs)))
return pmf
def RandomN(self):
"""Returns a random value of n."""
return self.DistN().Random()
def DistQ(self, iters=100):
"""Computes the distribution of q based on distribution of n.
Returns: pmf of q
"""
cdf_n = self.DistN().MakeCdf()
sample_n = cdf_n.Sample(iters)
pmf = thinkbayes2.Pmf()
for n in sample_n:
q = self.RandomQ(n)
pmf.Incr(q)
pmf.Normalize()
return pmf
def RandomQ(self, n):
"""Returns a random value of q.
Based on n, self.num_reads and self.conc.
n: number of species
Returns: q
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=self.conc)
prevalences = dirichlet.Random()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(self.num_reads)
seen = set(sample)
# add up the prevalence of unseen species
q = 0
for species, prev in enumerate(prevalences):
if species not in seen:
q += prev
return q
def MarginalBeta(self, n, index):
"""Computes the conditional distribution of the indicated species.
n: conditional number of species
index: which species
Returns: Beta object representing a distribution of prevalence.
"""
alpha0 = self.params[:n].sum()
alpha = self.params[index]
return thinkbayes2.Beta(alpha, alpha0-alpha)
def DistOfPrevalence(self, index):
"""Computes the distribution of prevalence for the indicated species.
index: which species
Returns: (metapmf, mix) where metapmf is a MetaPmf and mix is a Pmf
"""
metapmf = thinkbayes2.Pmf()
for n, prob in zip(self.ns, self.probs):
beta = self.MarginalBeta(n, index)
pmf = beta.MakePmf()
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf)
return metapmf, mix
def SamplePosterior(self):
"""Draws random n and prevalences.
Returns: (n, prevalences)
"""
n = self.RandomN()
prevalences = self.SamplePrevalences(n)
#print 'Peeking at n_cheat'
#n = n_cheat
return n, prevalences
def SamplePrevalences(self, n):
"""Draws a sample of prevalences given n.
n: the number of species assumed in the conditional
Returns: numpy array of n prevalences
"""
if n == 1:
return [1.0]
q_desired = self.RandomQ(n)
q_desired = max(q_desired, 1e-6)
params = self.Unbias(n, self.m, q_desired)
gammas = numpy.random.gamma(params)
gammas /= gammas.sum()
return gammas
def Unbias(self, n, m, q_desired):
"""Adjusts the parameters to achieve desired prev_unseen (q).
n: number of species
m: seen species
q_desired: prevalence of unseen species
"""
params = self.params[:n].copy()
if n == m:
return params
x = sum(params[:m])
y = sum(params[m:])
a = x + y
#print x, y, a, x/a, y/a
g = q_desired * a / y
f = (a - g * y) / x
params[:m] *= f
params[m:] *= g
return params
class Species3(Species2):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observations
"""
# sample the likelihoods and add them up
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
m = len(data)
self.params[:m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data under all hypotheses.
data: list of observations
"""
# get a random sample
gammas = numpy.random.gamma(self.params)
# row is just the first m elements of gammas
m = len(data)
row = gammas[:m]
# col is the cumulative sum of gammas
col = numpy.cumsum(gammas)[self.ns[0]-1:]
# each row of the array is a set of ps, normalized
# for each hypothetical value of n
array = row / col[:, numpy.newaxis]
# computing the multinomial PDF under a log transform
# take the log of the ps and multiply by the data
terms = numpy.log(array) * data
# add up the rows
log_likes = terms.sum(axis=1)
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
# correct for the number of ways we could see m species
# out of a possible n
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
class Species4(Species):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
m = len(data)
# loop through the species and update one at a time
for i in range(m):
one = numpy.zeros(i+1)
one[i] = data[i]
# call the parent class
Species.Update(self, one)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
Note: this only works correctly if we update one species at a time.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of unseen species the new one
# could have been
m = len(data)
num_unseen = dirichlet.n - m + 1
like *= num_unseen
return like
class Species5(Species2):
"""Represents hypotheses about the number of species.
Combines two laters of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies in increasing order
"""
# loop through the species and update one at a time
m = len(data)
for i in range(m):
self.UpdateOne(i+1, data[i])
self.params[i] += data[i]
def UpdateOne(self, i, count):
"""Updates the suite based on the data.
Evaluates the likelihood for all values of n.
i: which species was observed (1..n)
count: how many were observed
"""
# how many species have we seen so far
self.m = i
# how many reads have we seen
self.num_reads += count
if self.iters == 0:
return
# sample the likelihoods and add them up
likes = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
likes += self.SampleLikelihood(i, count)
# correct for the number of unseen species the new one
# could have been
unseen_species = [n-i+1 for n in self.ns]
likes *= unseen_species
# multiply the priors by the likelihoods and renormalize
self.probs *= likes
self.probs /= self.probs.sum()
def SampleLikelihood(self, i, count):
"""Computes the likelihood of the data under all hypotheses.
i: which species was observed
count: how many were observed
"""
# get a random sample of p
gammas = numpy.random.gamma(self.params)
# sums is the cumulative sum of p, for each value of n
sums = numpy.cumsum(gammas)[self.ns[0]-1:]
# get p for the mth species, for each value of n
ps = gammas[i-1] / sums
log_likes = numpy.log(ps) * count
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
return likes
def MakePosterior(constructor, data, ns, conc=1, iters=1000):
"""Makes a suite, updates it and returns the posterior suite.
Prints the elapsed time.
data: observed species and their counts
ns: sequence of hypothetical ns
conc: concentration parameter
iters: how many samples to draw
Returns: posterior suite of the given type
"""
suite = constructor(ns, conc=conc, iters=iters)
# print constructor.__name__
start = time.time()
suite.Update(data)
end = time.time()
print('Processing time', end-start)
return suite
def PlotAllVersions():
"""Makes a graph of posterior distributions of N."""
data = [1, 2, 3]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Save(root='species3',
xlabel='Number of species',
ylabel='Prob')
def PlotMedium():
"""Makes a graph of posterior distributions of N."""
data = [1, 1, 1, 1, 2, 3, 5, 9]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def SimpleDirichletExample():
"""Makes a plot showing posterior distributions for three species.
This is the case where we know there are exactly three species.
"""
thinkplot.Clf()
thinkplot.PrePlot(3)
names = ['lions', 'tigers', 'bears']
data = [3, 2, 1]
dirichlet = thinkbayes2.Dirichlet(3)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print('mean', names[i], beta.Mean())
dirichlet.Update(data)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print('mean', names[i], beta.Mean())
pmf = beta.MakePmf(label=names[i])
thinkplot.Pmf(pmf)
thinkplot.Save(root='species1',
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
)
def HierarchicalExample():
"""Shows the posterior distribution of n for lions, tigers and bears.
"""
ns = range(3, 30)
suite = Species(ns, iters=8000)
data = [3, 2, 1]
suite.Update(data)
thinkplot.Clf()
thinkplot.PrePlot(num=1)
pmf = suite.DistN()
thinkplot.Pdf(pmf)
thinkplot.Save(root='species2',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def CompareHierarchicalExample():
"""Makes a graph of posterior distributions of N."""
data = [3, 2, 1]
m = len(data)
n = 30
ns = range(m, n)
constructors = [Species, Species5]
iters = [1000, 100]
for constructor, iters in zip(constructors, iters):
suite = MakePosterior(constructor, data, ns, iters)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def ProcessSubjects(codes):
"""Process subjects with the given codes and plot their posteriors.
code: sequence of string codes
"""
thinkplot.Clf()
thinkplot.PrePlot(len(codes))
subjects = ReadRarefactedData()
pmfs = []
for code in codes:
subject = subjects[code]
subject.Process()
pmf = subject.suite.DistN()
pmf.label = subject.code
thinkplot.Pmf(pmf)
pmfs.append(pmf)
print('ProbGreater', thinkbayes2.PmfProbGreater(pmfs[0], pmfs[1]))
print('ProbLess', thinkbayes2.PmfProbLess(pmfs[0], pmfs[1]))
thinkplot.Save(root='species4',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def RunSubject(code, conc=1, high=500):
"""Run the analysis for the subject with the given code.
code: string code
"""
subjects = JoinSubjects()
subject = subjects[code]
subject.Process(conc=conc, high=high, iters=300)
subject.MakeQuickPrediction()
PrintSummary(subject)
actual_l = subject.total_species - subject.num_species
cdf_l = subject.DistL().MakeCdf()
PrintPrediction(cdf_l, actual_l)
subject.MakeFigures()
num_reads = 400
curves = subject.RunSimulations(100, num_reads)
root = 'species-rare-%s' % subject.code
PlotCurves(curves, root=root)
num_reads = 800
curves = subject.RunSimulations(500, num_reads)
ks = [100, 200, 400, 800]
cdfs = MakeConditionals(curves, ks)
root = 'species-cond-%s' % subject.code
PlotConditionals(cdfs, root=root)
num_reads = 1000
curves = subject.RunSimulations(500, num_reads, frac_flag=True)
ks = [10, 100, 200, 400, 600, 800, 1000]
cdfs = MakeFracCdfs(curves, ks)
root = 'species-frac-%s' % subject.code
PlotFracCdfs(cdfs, root=root)
def PrintSummary(subject):
"""Print a summary of a subject.
subject: Subject
"""
print(subject.code)
print('found %d species in %d reads' % (subject.num_species,
subject.num_reads))
print('total %d species in %d reads' % (subject.total_species,
subject.total_reads))
cdf = subject.suite.DistN().MakeCdf()
print('n')
PrintPrediction(cdf, 'unknown')
def PrintPrediction(cdf, actual):
"""Print a summary of a prediction.
cdf: predictive distribution
actual: actual value
"""
median = cdf.Percentile(50)
low, high = cdf.CredibleInterval(75)
print('predicted %0.2f (%0.2f %0.2f)' % (median, low, high))
print('actual', actual)
def RandomSeed(x):
"""Initialize random.random and numpy.random.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
def GenerateFakeSample(n, r, tr, conc=1):
"""Generates fake data with the given parameters.
n: number of species
r: number of reads in subsample
tr: total number of reads
conc: concentration parameter
Returns: hist of all reads, hist of subsample, prev_unseen
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(tr)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract a subset of the data
if tr > r:
random.shuffle(sample)
subsample = sample[:r]
subhist = thinkbayes2.Hist(subsample)
else:
subhist = hist
# add up the prevalence of unseen species
prev_unseen = 0
for species, prev in enumerate(prevalences):
if species not in subhist:
prev_unseen += prev
return hist, subhist, prev_unseen
def PlotActualPrevalences():
"""Makes a plot comparing actual prevalences with a model.
"""
# read data
subject_map, _ = ReadCompleteDataset()
# for subjects with more than 50 species,
# PMF of max prevalence, and PMF of max prevalence
# generated by a simulation
pmf_actual = thinkbayes2.Pmf()
pmf_sim = thinkbayes2.Pmf()
# concentration parameter used in the simulation
conc = 0.06
for code, subject in subject_map.items():
prevalences = subject.GetPrevalences()
m = len(prevalences)
if m < 2:
continue
actual_max = max(prevalences)
print(code, m, actual_max)
# incr the PMFs
if m > 50:
pmf_actual.Incr(actual_max)
pmf_sim.Incr(SimulateMaxPrev(m, conc))
# plot CDFs for the actual and simulated max prevalence
cdf_actual = pmf_actual.MakeCdf(label='actual')
cdf_sim = pmf_sim.MakeCdf(label='sim')
thinkplot.Cdfs([cdf_actual, cdf_sim])
thinkplot.Show()
def ScatterPrevalences(ms, actual):
"""Make a scatter plot of actual prevalences and expected values.
ms: sorted sequence of in m (number of species)
actual: sequence of actual max prevalence
"""
for conc in [1, 0.5, 0.2, 0.1]:
expected = [ExpectedMaxPrev(m, conc) for m in ms]
thinkplot.Plot(ms, expected)
thinkplot.Scatter(ms, actual)
thinkplot.Show(xscale='log')
def SimulateMaxPrev(m, conc=1):
"""Returns random max prevalence from a Dirichlet distribution.
m: int number of species
conc: concentration parameter of the Dirichlet distribution
Returns: float max of m prevalences
"""
dirichlet = thinkbayes2.Dirichlet(m, conc)
prevalences = dirichlet.Random()
return max(prevalences)
def ExpectedMaxPrev(m, conc=1, iters=100):
"""Estimate expected max prevalence.
m: number of species
conc: concentration parameter
iters: how many iterations to run
Returns: expected max prevalence
"""
dirichlet = thinkbayes2.Dirichlet(m, conc)
t = []
for _ in range(iters):
prevalences = dirichlet.Random()
t.append(max(prevalences))
return numpy.mean(t)
class Calibrator(object):
"""Encapsulates the calibration process."""
def __init__(self, conc=0.1):
"""
"""
self.conc = conc
self.ps = range(10, 100, 10)
self.total_n = numpy.zeros(len(self.ps))
self.total_q = numpy.zeros(len(self.ps))
self.total_l = numpy.zeros(len(self.ps))
self.n_seq = []
self.q_seq = []
self.l_seq = []
def Calibrate(self, num_runs=100, n_low=30, n_high=400, r=400, tr=1200):
"""Runs calibrations.
num_runs: how many runs
"""
for seed in range(num_runs):
self.RunCalibration(seed, n_low, n_high, r, tr)
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def Validate(self, num_runs=100, clean_param=0):
"""Runs validations.
num_runs: how many runs
"""
subject_map, _ = ReadCompleteDataset(clean_param=clean_param)
i = 0
for match in subject_map.itervalues():
if match.num_reads < 400:
continue
num_reads = 100
print('Validate', match.code)
subject = match.Resample(num_reads)
subject.Match(match)
n_actual = None
q_actual = subject.prev_unseen
l_actual = subject.total_species - subject.num_species
self.RunSubject(subject, n_actual, q_actual, l_actual)
i += 1
if i == num_runs:
break
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def PlotN(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
xs, ys = zip(*self.n_seq)
if None in xs:
return
high = max(xs+ys)
thinkplot.Plot([0, high], [0, high], color='gray')
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual n',
ylabel='Predicted')
def PlotQ(self, root='species-q'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
thinkplot.Plot([0, 0.2], [0, 0.2], color='gray')
xs, ys = zip(*self.q_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual q',
ylabel='Predicted')
def PlotL(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual l.
"""
thinkplot.Plot([0, 20], [0, 20], color='gray')
xs, ys = zip(*self.l_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual l',
ylabel='Predicted')
def PlotCalibrationCurves(self, root='species5'):
"""Plots calibration curves"""
print(self.total_n)
print(self.total_q)
print(self.total_l)
thinkplot.Plot([0, 100], [0, 100], color='gray', alpha=0.2)
if self.total_n[0] >= 0:
thinkplot.Plot(self.ps, self.total_n, label='n')
thinkplot.Plot(self.ps, self.total_q, label='q')
thinkplot.Plot(self.ps, self.total_l, label='l')
thinkplot.Save(root=root,
axis=[0, 100, 0, 100],
xlabel='Ideal percentages',
ylabel='Predictive distributions',
formats=FORMATS,
)
def RunCalibration(self, seed, n_low, n_high, r, tr):
"""Runs a single calibration run.
Generates N and prevalences from a Dirichlet distribution,
then generates simulated data.
Runs analysis to get the posterior distributions.
Generates calibration curves for each posterior distribution.
seed: int random seed
"""
# generate a random number of species and their prevalences
# (from a Dirichlet distribution with alpha_i = conc for all i)
RandomSeed(seed)
n_actual = random.randrange(n_low, n_high+1)
hist, subhist, q_actual = GenerateFakeSample(
n_actual,
r,
tr,
self.conc)
l_actual = len(hist) - len(subhist)
print('Run low, high, conc', n_low, n_high, self.conc)
print('Run r, tr', r, tr)
print('Run n, q, l', n_actual, q_actual, l_actual)
# extract the data
data = [count for species, count in subhist.Items()]
data.sort()
print('data', data)
# make a Subject and process
subject = Subject('simulated')
subject.num_reads = r
subject.total_reads = tr
for species, count in subhist.Items():
subject.Add(species, count)
subject.Done()
self.RunSubject(subject, n_actual, q_actual, l_actual)
def RunSubject(self, subject, n_actual, q_actual, l_actual):
"""Runs the analysis for a subject.
subject: Subject
n_actual: number of species
q_actual: prevalence of unseen species
l_actual: number of new species
"""
# process and make prediction
subject.Process(conc=self.conc, iters=100)
subject.MakeQuickPrediction()
# extract the posterior suite
suite = subject.suite
# check the distribution of n
pmf_n = suite.DistN()
print('n')
self.total_n += self.CheckDistribution(pmf_n, n_actual, self.n_seq)
# check the distribution of q
pmf_q = suite.DistQ()
print('q')
self.total_q += self.CheckDistribution(pmf_q, q_actual, self.q_seq)
# check the distribution of additional species
pmf_l = subject.DistL()
print('l')
self.total_l += self.CheckDistribution(pmf_l, l_actual, self.l_seq)
def CheckDistribution(self, pmf, actual, seq):
"""Checks a predictive distribution and returns a score vector.
pmf: predictive distribution
actual: actual value
seq: which sequence to append (actual, mean) onto
"""
mean = pmf.Mean()
seq.append((actual, mean))
cdf = pmf.MakeCdf()
PrintPrediction(cdf, actual)
sv = ScoreVector(cdf, self.ps, actual)
return sv
def ScoreVector(cdf, ps, actual):
"""Checks whether the actual value falls in each credible interval.
cdf: predictive distribution
ps: percentages to check (0-100)
actual: actual value
Returns: numpy array of 0, 0.5, or 1
"""
scores = []
for p in ps:
low, high = cdf.CredibleInterval(p)
score = Score(low, high, actual)
scores.append(score)
return numpy.array(scores)
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def PlotSubjectCdf(code=None, clean_param=0):
"""Checks whether the Dirichlet model can replicate the data.
"""
subject_map, uber_subject = ReadCompleteDataset(clean_param=clean_param)
if code is None:
subjects = subject_map.values()
subject = random.choice(subjects)
code = subject.code
elif code == 'uber':
subject = uber_subject
else:
subject = subject_map[code]
print(subject.code)
m = subject.GetM()
subject.Process(high=m, conc=0.1, iters=0)
print(subject.suite.params[:m])
# plot the cdf
options = dict(linewidth=3, color='blue', alpha=0.5)
cdf = subject.MakeCdf()
thinkplot.Cdf(cdf, **options)
options = dict(linewidth=1, color='green', alpha=0.5)
# generate fake subjects and plot their CDFs
for _ in range(10):
prevalences = subject.suite.SamplePrevalences(m)
fake = FakeSubject(prevalences=prevalences)
cdf = fake.MakeCdf()
thinkplot.Cdf(cdf, **options)
root = 'species-cdf-%s' % code
thinkplot.Save(root=root,
xlabel='rank',
ylabel='CDF',
xscale='log',
formats=FORMATS,
)
def RunCalibration(flag='cal', num_runs=100, clean_param=50):
"""Runs either the calibration or validation process.
flag: string 'cal' or 'val'
num_runs: how many runs
clean_param: parameter used for data cleaning
"""
cal = Calibrator(conc=0.1)
if flag == 'val':
cal.Validate(num_runs=num_runs, clean_param=clean_param)
else:
cal.Calibrate(num_runs=num_runs)
cal.PlotN(root='species-n-%s' % flag)
cal.PlotQ(root='species-q-%s' % flag)
cal.PlotL(root='species-l-%s' % flag)
cal.PlotCalibrationCurves(root='species5-%s' % flag)
def RunTests():
"""Runs calibration code and generates some figures."""
RunCalibration(flag='val')
RunCalibration(flag='cal')
PlotSubjectCdf('B1558.G', clean_param=50)
PlotSubjectCdf(None)
def main(script):
RandomSeed(17)
RunSubject('B1242', conc=1, high=100)
RandomSeed(17)
SimpleDirichletExample()
RandomSeed(17)
HierarchicalExample()
if __name__ == '__main__':
main(*sys.argv)
| mit |
dhruvagarwal/django | tests/generic_views/test_list.py | 306 | 12129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
cainiaocome/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 217 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
yyjiang/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 217 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
giorgiop/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 373 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
istio/envoy | tools/api_proto_plugin/annotations.py | 5 | 3708 | """Envoy API annotations."""
from collections import namedtuple
import re
# Key-value annotation regex.
ANNOTATION_REGEX = re.compile('\[#([\w-]+?):\s*(.*?)\](\s?)', re.DOTALL)
# Page/section titles with special prefixes in the proto comments
DOC_TITLE_ANNOTATION = 'protodoc-title'
# When documenting an extension, this should be used to specify the qualified
# name that the extension registers as in the static registry, e.g.
# envoy.filters.network.http_connection_manager.
EXTENSION_ANNOTATION = 'extension'
# Not implemented yet annotation on leading comments, leading to hiding of
# field.
NOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide'
# For large protos, place a comment at the top that specifies the next free field number.
NEXT_FREE_FIELD_ANNOTATION = 'next-free-field'
# Comment that allows for easy searching for things that need cleaning up in the next major
# API version.
NEXT_MAJOR_VERSION_ANNOTATION = 'next-major-version'
# Comment. Just used for adding text that will not go into the docs at all.
COMMENT_ANNOTATION = 'comment'
VALID_ANNOTATIONS = set([
DOC_TITLE_ANNOTATION,
EXTENSION_ANNOTATION,
NOT_IMPLEMENTED_HIDE_ANNOTATION,
NEXT_FREE_FIELD_ANNOTATION,
NEXT_MAJOR_VERSION_ANNOTATION,
COMMENT_ANNOTATION,
])
# These can propagate from file scope to message/enum scope (and be overridden).
INHERITED_ANNOTATIONS = set([
# Nothing here right now, this used to be PROTO_STATUS_ANNOTATION. Retaining
# this capability for potential future use.
])
class AnnotationError(Exception):
"""Base error class for the annotations module."""
def ExtractAnnotations(s, inherited_annotations=None):
"""Extract annotations map from a given comment string.
Args:
s: string that may contains annotations.
inherited_annotations: annotation map from file-level inherited annotations
(or None) if this is a file-level comment.
Returns:
Annotation map.
"""
annotations = {
k: v for k, v in (inherited_annotations or {}).items() if k in INHERITED_ANNOTATIONS
}
# Extract annotations.
groups = re.findall(ANNOTATION_REGEX, s)
for group in groups:
annotation = group[0]
if annotation not in VALID_ANNOTATIONS:
raise AnnotationError('Unknown annotation: %s' % annotation)
annotations[group[0]] = group[1].lstrip()
return annotations
def XformAnnotation(s, annotation_xforms):
"""Return transformed string with annotation transformers.
The annotation will be replaced with the new value returned by the transformer.
If the transformer returns None, then the annotation will be removed.
If the annotation presented in transformers doesn't exist in the original string,
a new annotation will be appended to the end of string.
Args:
annotation_xforms: a dict of transformers for annotations.
Returns:
transformed string.
"""
present_annotations = set()
def xform(match):
annotation, content, trailing = match.groups()
present_annotations.add(annotation)
annotation_xform = annotation_xforms.get(annotation)
if annotation_xform:
value = annotation_xform(annotation)
return '[#%s: %s]%s' % (annotation, value, trailing) if value is not None else ''
else:
return match.group(0)
def append(s, annotation, content):
return '%s [#%s: %s]\n' % (s, annotation, content)
xformed = re.sub(ANNOTATION_REGEX, xform, s)
for annotation, xform in sorted(annotation_xforms.items()):
if annotation not in present_annotations:
value = xform(None)
if value is not None:
xformed = append(xformed, annotation, value)
return xformed
def WithoutAnnotations(s):
return re.sub(ANNOTATION_REGEX, '', s)
| apache-2.0 |
PatrickOReilly/scikit-learn | examples/decomposition/plot_kernel_pca.py | 350 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
glennq/scikit-learn | sklearn/externals/joblib/parallel.py | 33 | 32728 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
from contextlib import contextmanager
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
from .format_stack import format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend)
from ._compat import _basestring
from .func_inspect import getfullargspec
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
BACKENDS = {
'multiprocessing': MultiprocessingBackend,
'threading': ThreadingBackend,
'sequential': SequentialBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'multiprocessing'
DEFAULT_N_JOBS = 1
# Thread local value that can be overriden by the ``parallel_backend`` context
# manager
_backend = threading.local()
def get_active_backend():
"""Return the active default backend"""
active_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if active_backend_and_jobs is not None:
return active_backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now
active_backend = BACKENDS[DEFAULT_BACKEND]()
return active_backend, DEFAULT_N_JOBS
@contextmanager
def parallel_backend(backend, n_jobs=-1, **backend_params):
"""Change the default backend used by Parallel inside a with block.
If ``backend`` is a string it must match a previously registered
implementation using the ``register_parallel_backend`` function.
Alternatively backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the ``Parallel`` class constructor. It is particularly useful when calling
into library code that uses joblib internally but does not expose the
backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if isinstance(backend, _basestring):
backend = BACKENDS[backend](**backend_params)
old_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
try:
_backend.backend_and_jobs = (backend, n_jobs)
# return the backend instance to make it easier to write tests
yield backend, n_jobs
finally:
if old_backend_and_jobs is None:
if getattr(_backend, 'backend_and_jobs', None) is not None:
del _backend.backend_and_jobs
else:
_backend.backend_and_jobs = old_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the Parallel class. Moreover, the default backend can be
overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the is the number of workers requested by the callers.
Passing n_jobs=-1 means requesting all available workers for instance
matching the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, _ = get_active_backend()
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
register_parallel_backend. This will allow you to implement
a backend of your liking.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float, optional
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers should never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, timeout=None,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
active_backend, default_n_jobs = get_active_backend()
if backend is None and n_jobs == 1:
# If we are under a parallel_backend context manager, look up
# the default number of jobs and use that instead:
n_jobs = default_n_jobs
self.n_jobs = n_jobs
self.verbose = verbose
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if isinstance(max_nbytes, _basestring):
max_nbytes = memstr_to_bytes(max_nbytes)
self._backend_args = dict(
max_nbytes=max_nbytes,
mmap_mode=mmap_mode,
temp_folder=temp_folder,
verbose=max(0, self.verbose - 50),
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
if backend is None:
backend = active_backend
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend()
else:
try:
backend_factory = BACKENDS[backend]
except KeyError:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys())))
backend = backend_factory()
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self._backend = backend
self._output = None
self._jobs = list()
self._managed_backend = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_backend = True
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_backend()
self._managed_backend = False
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
return self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
return self._initialize_backend()
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_backend(self):
if self._backend is not None:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._backend.apply_async(batch, callback=cb)
self._jobs.append(job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto':
batch_size = self._backend.compute_batch_size()
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# Original job iterator becomes None once it has been fully
# consumed : at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
if self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time), ))
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
# check if timeout supported in backend future implementation
if 'timeout' in getfullargspec(job.get).args:
self._output.extend(job.get(timeout=self.timeout))
else:
self._output.extend(job.get())
except BaseException as exception:
# Note: we catch any BaseException instead of just Exception
# instances to also include KeyboardInterrupt.
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# If the backends allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (backend is not None and
hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_backend:
self._terminate_backend()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
giorgiop/scikit-learn | examples/svm/plot_svm_kernels.py | 326 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
glennq/scikit-learn | sklearn/tests/test_kernel_ridge.py | 339 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
pianomania/scikit-learn | sklearn/naive_bayes.py | 20 | 30830 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
glennq/scikit-learn | sklearn/utils/deprecation.py | 75 | 2417 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| bsd-3-clause |
pianomania/scikit-learn | examples/decomposition/plot_image_denoising.py | 69 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
glennq/scikit-learn | sklearn/cluster/bicluster.py | 64 | 19850 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
glennq/scikit-learn | sklearn/linear_model/least_angle.py | 5 | 57466 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y, deprecated
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
@property
@deprecated("Attribute mse_path_ is deprecated in 0.18 and "
"will be removed in 0.20. Use 'cv_mse_path_' instead")
def cv_mse_path_(self):
return self.mse_path_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
privatecaptain/qden | flask_script.py | 1 | 7845 | from flask import Flask, request, make_response, redirect, render_template
import random
from flask import Flask, url_for,render_template
import xlrd
import sklearn.ensemble
from sklearn.ensemble import RandomForestRegressor
import sqlite3
import requests
db_connector = sqlite3.connect("/var/www/qden/qden_data.db",check_same_thread=False)
#
"""
http://localhost:5000/84.2/56.8/7329.4
"""
app = Flask(__name__)
path = "/var/www/qden/Q_Den.xlsx"
book = xlrd.open_workbook(path)
# print book.nsheets
first_sheet = book.sheet_by_index(0)
num_rows = first_sheet.nrows - 1
curr_row = 0
x=[]
y=[]
y_echo=[]
y_total_cooling_hours=[]
y_DX=[]
y_EERH=[]
y_consumption_saving=[]
y_peak_demand_saving=[]
y_water_consumption=[]
while curr_row < num_rows:
curr_row += 1
# row = first_sheet.row(curr_row)
dry_bulb = first_sheet.cell(curr_row,1).value
wet_bulb = first_sheet.cell(curr_row,3).value
elevation = first_sheet.cell(curr_row,4).value
echo = first_sheet.cell(curr_row,5).value
total_cooling_hours = first_sheet.cell(curr_row,6).value
DX = first_sheet.cell(curr_row,7).value
EERH = first_sheet.cell(curr_row,8).value
consumption_saving = first_sheet.cell(curr_row,9).value
peak_demand_saving = first_sheet.cell(curr_row,10).value
water_consumption = first_sheet.cell(curr_row,11).value
xval = [dry_bulb,wet_bulb,elevation]
y.append(echo)
y_echo.append(echo)
y_total_cooling_hours.append(total_cooling_hours)
y_DX.append(DX)
y_EERH.append(EERH)
y_consumption_saving.append(consumption_saving)
y_peak_demand_saving.append(peak_demand_saving)
y_water_consumption.append(water_consumption)
x.append(xval)
#4410 5 0.0103430437039, 960 7 0.010346165879
clf_echo = RandomForestRegressor(n_estimators=4410, max_depth=5)
clf_echo.fit(x, y_echo)
#960 19 977462.760895,
clf_total_cooling_hours = RandomForestRegressor(n_estimators=960, max_depth=19)
clf_total_cooling_hours.fit(x, y_total_cooling_hours)
clf_DX = RandomForestRegressor(n_estimators=1610, max_depth=8)
clf_DX.fit(x, y_DX)
#4810 4 7.19106692097 4810 4 7.19106692097 2760 18 7.3044908132
clf_EERH = RandomForestRegressor(n_estimators=4810, max_depth=4)
clf_EERH.fit(x, y_EERH)
#1910 18 0.00471920801752, 4610 7 0.00476592327654
clf_consumption_saving = RandomForestRegressor(n_estimators=1910, max_depth=18)
clf_consumption_saving.fit(x, y_consumption_saving)
#done 10 7 0.00819795771602, 510 14 0.00930511952647, 610 11 0.00936973055891,
clf_peak_demand_saving = RandomForestRegressor(n_estimators=510, max_depth=14)
clf_peak_demand_saving.fit(x, y_peak_demand_saving)
#2610 11 0.0129692552313
clf_water_consumption = RandomForestRegressor(n_estimators=2610, max_depth=11)
clf_water_consumption.fit(x, y_water_consumption)
# num_rows = 5
#@app.route('/')
#def api_root():
# return render_template('login.html')
#routing the qden page to the User Interface
@app.route('/articles')
def api_articles():
return 'List of ' + url_for('api_articles')
# @app.route('/articles/<articleid>')
# def api_article(articleid):
# return 'You are reading ' + articleid
@app.route('/articles/<articleid1>/<articleid2>/<articleid3>')
def api_article(articleid1):
return "hii"
# return 'You are reading ' + articleid1
@app.route('/<dry_bulb>/<wet_bulb>/<elevation>')
def user(dry_bulb,wet_bulb,elevation):
# num_rows = 5
# path = "/Users/prasanna/mrjob/Q_Den.xlsx"
# book = xlrd.open_workbook(path)
# # print book.nsheets
# first_sheet = book.sheet_by_index(0)
# num_rows = first_sheet.nrows - 1
X_test = [dry_bulb,wet_bulb,elevation]
preds_echo = clf_echo.predict(X_test)
preds_total_cooling_hours = clf_total_cooling_hours.predict(X_test)
preds_DX = clf_DX.predict(X_test)
preds_EERH = clf_EERH.predict(X_test)
preds_consumption_saving = clf_consumption_saving.predict(X_test)
preds_peak_demand_saving = clf_peak_demand_saving.predict(X_test)
preds_water_consumption = clf_water_consumption.predict(X_test)
obj = {"echo":preds_echo[0],"total cooling hours":preds_total_cooling_hours[0],"DX":preds_DX[0],"EERH":preds_EERH[0],
"consumption saving":preds_consumption_saving[0],"demand saving":preds_peak_demand_saving[0],
"water consumption":preds_water_consumption[0]
}
return str(obj) + "," + str(dry_bulb) + "," + str(wet_bulb) + "," + str(elevation) + "," + str(preds_echo[0]) + "," + str(preds_total_cooling_hours[0]) + "," + str(preds_DX[0]) + "," + str(preds_EERH[0]) + "," + str(preds_consumption_saving[0]) + "," + str(preds_peak_demand_saving[0]) + "," + str(preds_water_consumption[0])
# return "usercalled"+str(dry_bulb)+str(wet_bulb)+str(elevation)
#
def sendmail(to,code):
return requests.post(
"https://api.mailgun.net/v3/info.air2o.com/messages",
auth=("api", "key-49df66c0deb773d5ee957597be27f9e9"),
data={"from": "Q-DEN Air2O <verification@info.air2o.com>",
"to": [to,],
"subject": "Verification Email",
"text": "Congratulations on creating your Q-DEN account. Please enter the code below to activate your account.\n\n\n" + code})
def adduser(name,email,code):
try:
with db_connector:
cursor = db_connector.cursor()
t = (name,email,code,0)
cursor.execute("INSERT INTO USERS(name,email,password,active) VALUES(?,?,?,?);",t)
return True
except:
return False
def activate(email):
try:
with db_connector:
cursor = db_connector.cursor()
t = (1,email)
cursor.execute("UPDATE USERS SET active=? WHERE email=?;",t)
return True
except:
return False
def verify(email,code):
try:
with db_connector:
cursor = db_connector.cursor()
t = (email,)
cursor.execute("SELECT password FROM USERS WHERE email=?;",t)
pwd = cursor.fetchone()[0]
if code == pwd:
return True
except:
pass
return False
def valid_login(email):
try:
with db_connector:
cursor = db_connector.cursor()
t = (email,)
cursor.execute("SELECT * FROM USERS WHERE email=?;",t)
dpass = cursor.fetchone()[0]
if dpass:
return True
except:
pass
return False
def log_the_user_in(email,flag=False):
try:
with db_connector:
cursor = db_connector.cursor()
t = (email,)
cursor.execute("SELECT active FROM USERS WHERE email=?;",t)
act = cursor.fetchone()[0]
if int(act) == 1:
resp = make_response(render_template('index.html'))
if flag:
resp.set_cookie('email',email)
return resp
except:
pass
return render_template("success.html",email=email)
def generate_code():
return random.randint(1000,10000)
@app.route('/')
def index():
email = request.cookies.get('email')
if email:
return log_the_user_in(email)
return render_template('login.html')
@app.route('/login', methods=['POST',])
def login():
if request.method == 'POST':
email = request.form['email']
try:
flag = request.form['remember']
except:
flag = None
print flag
if valid_login(email):
if flag == 'on':
return log_the_user_in(email,flag=True)
else:
return log_the_user_in(email)
else:
return render_template('loginerror.html')
return render_template('login.html')
@app.route('/signup',methods=['POST'])
def signup():
email = request.form['email']
if request.method == 'POST':
name = request.form['name']
code = str(generate_code())
if adduser(name,email,code):
sendmail(email,code)
return render_template('success.html',email=email)
return render_template('login.html')
@app.route('/activate',methods=['POST',])
def activate_user():
email = request.form['email']
code = request.form['code']
if verify(email,code):
if activate(email):
return render_template('index.html')
return render_template('login.html')
if __name__ == '__main__':
app.run()
| apache-2.0 |
ray-project/ray | rllib/policy/view_requirement.py | 1 | 6277 | import dataclasses
import gym
from typing import Dict, List, Optional, Union
import numpy as np
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.serialization import (
gym_space_to_dict,
gym_space_from_dict,
)
torch, _ = try_import_torch()
@PublicAPI
@dataclasses.dataclass
class ViewRequirement:
"""Single view requirement (for one column in an SampleBatch/input_dict).
Policies and ModelV2s return a Dict[str, ViewRequirement] upon calling
their `[train|inference]_view_requirements()` methods, where the str key
represents the column name (C) under which the view is available in the
input_dict/SampleBatch and ViewRequirement specifies the actual underlying
column names (in the original data buffer), timestep shifts, and other
options to build the view.
Examples:
>>> from ray.rllib.models.modelv2 import ModelV2
>>> # The default ViewRequirement for a Model is:
>>> req = ModelV2(...).view_requirements # doctest: +SKIP
>>> print(req) # doctest: +SKIP
{"obs": ViewRequirement(shift=0)}
Args:
data_col: The data column name from the SampleBatch
(str key). If None, use the dict key under which this
ViewRequirement resides.
space: The gym Space used in case we need to pad data
in inaccessible areas of the trajectory (t<0 or t>H).
Default: Simple box space, e.g. rewards.
shift: Single shift value or
list of relative positions to use (relative to the underlying
`data_col`).
Example: For a view column "prev_actions", you can set
`data_col="actions"` and `shift=-1`.
Example: For a view column "obs" in an Atari framestacking
fashion, you can set `data_col="obs"` and
`shift=[-3, -2, -1, 0]`.
Example: For the obs input to an attention net, you can specify
a range via a str: `shift="-100:0"`, which will pass in
the past 100 observations plus the current one.
index: An optional absolute position arg,
used e.g. for the location of a requested inference dict within
the trajectory. Negative values refer to counting from the end
of a trajectory. (#TODO: Is this still used?)
batch_repeat_value: determines how many time steps we should skip
before we repeat the view indexing for the next timestep. For RNNs this
number is usually the sequence length that we will rollout over.
Example:
view_col = "state_in_0", data_col = "state_out_0"
batch_repeat_value = 5, shift = -1
buffer["state_out_0"] = [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
output["state_in_0"] = [-1, 4, 9]
Explanation: For t=0, we output buffer["state_out_0"][-1]. We then skip 5
time steps and repeat the view. for t=5, we output buffer["state_out_0"][4]
. Continuing on this pattern, for t=10, we output buffer["state_out_0"][9].
used_for_compute_actions: Whether the data will be used for
creating input_dicts for `Policy.compute_actions()` calls (or
`Policy.compute_actions_from_input_dict()`).
used_for_training: Whether the data will be used for
training. If False, the column will not be copied into the
final train batch.
"""
data_col: Optional[str] = None
space: gym.Space = None
shift: Union[int, str, List[int]] = 0
index: Optional[int] = None
batch_repeat_value: int = 1
used_for_compute_actions: bool = True
used_for_training: bool = True
shift_arr: Optional[np.ndarray] = dataclasses.field(init=False)
def __post_init__(self):
"""Initializes a ViewRequirement object.
shift_arr is infered from the shift value.
For example:
- if shift is -1, then shift_arr is np.array([-1]).
- if shift is [-1, -2], then shift_arr is np.array([-2, -1]).
- if shift is "-2:2", then shift_arr is np.array([-2, -1, 0, 1, 2]).
"""
if self.space is None:
self.space = gym.spaces.Box(float("-inf"), float("inf"), shape=())
# TODO: ideally we won't need shift_from and shift_to, and shift_step.
# all of them should be captured within shift_arr.
# Special case: Providing a (probably larger) range of indices, e.g.
# "-100:0" (past 100 timesteps plus current one).
self.shift_from = self.shift_to = self.shift_step = None
if isinstance(self.shift, str):
split = self.shift.split(":")
assert len(split) in [2, 3], f"Invalid shift str format: {self.shift}"
if len(split) == 2:
f, t = split
self.shift_step = 1
else:
f, t, s = split
self.shift_step = int(s)
self.shift_from = int(f)
self.shift_to = int(t)
shift = self.shift
self.shfit_arr = None
if self.shift_from:
self.shift_arr = np.arange(
self.shift_from, self.shift_to + 1, self.shift_step
)
else:
if isinstance(shift, int):
self.shift_arr = np.array([shift])
elif isinstance(shift, list):
self.shift_arr = np.array(shift)
else:
ValueError(f'unrecognized shift type: "{shift}"')
def to_dict(self) -> Dict:
"""Return a dict for this ViewRequirement that can be JSON serialized."""
return {
"data_col": self.data_col,
"space": gym_space_to_dict(self.space),
"shift": self.shift,
"index": self.index,
"batch_repeat_value": self.batch_repeat_value,
"used_for_training": self.used_for_training,
"used_for_compute_actions": self.used_for_compute_actions,
}
@classmethod
def from_dict(cls, d: Dict):
"""Construct a ViewRequirement instance from JSON deserialized dict."""
d["space"] = gym_space_from_dict(d["space"])
return cls(**d)
| apache-2.0 |
ray-project/ray | rllib/examples/models/shared_weights_model.py | 1 | 6957 | import numpy as np
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
TF2_GLOBAL_SHARED_LAYER = None
class TF2SharedWeightsModel(TFModelV2):
"""Example of weight sharing between two different TFModelV2s.
NOTE: This will only work for tf2.x. When running with config.framework=tf,
use SharedWeightsModel1 and SharedWeightsModel2 below, instead!
The shared (single) layer is simply defined outside of the two Models,
then used by both Models in their forward pass.
"""
def __init__(
self, observation_space, action_space, num_outputs, model_config, name
):
super().__init__(
observation_space, action_space, num_outputs, model_config, name
)
global TF2_GLOBAL_SHARED_LAYER
# The global, shared layer to be used by both models.
if TF2_GLOBAL_SHARED_LAYER is None:
TF2_GLOBAL_SHARED_LAYER = tf.keras.layers.Dense(
units=64, activation=tf.nn.relu, name="fc1"
)
inputs = tf.keras.layers.Input(observation_space.shape)
last_layer = TF2_GLOBAL_SHARED_LAYER(inputs)
output = tf.keras.layers.Dense(
units=num_outputs, activation=None, name="fc_out"
)(last_layer)
vf = tf.keras.layers.Dense(units=1, activation=None, name="value_out")(
last_layer
)
self.base_model = tf.keras.models.Model(inputs, [output, vf])
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(input_dict["obs"])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class SharedWeightsModel1(TFModelV2):
"""Example of weight sharing between two different TFModelV2s.
NOTE: This will only work for tf1 (static graph). When running with
config.framework_str=tf2, use TF2SharedWeightsModel, instead!
Here, we share the variables defined in the 'shared' variable scope
by entering it explicitly with tf1.AUTO_REUSE. This creates the
variables for the 'fc1' layer in a global scope called 'shared'
(outside of the Policy's normal variable scope).
"""
def __init__(
self, observation_space, action_space, num_outputs, model_config, name
):
super().__init__(
observation_space, action_space, num_outputs, model_config, name
)
inputs = tf.keras.layers.Input(observation_space.shape)
with tf1.variable_scope(
tf1.VariableScope(tf1.AUTO_REUSE, "shared"),
reuse=tf1.AUTO_REUSE,
auxiliary_name_scope=False,
):
last_layer = tf.keras.layers.Dense(
units=64, activation=tf.nn.relu, name="fc1"
)(inputs)
output = tf.keras.layers.Dense(
units=num_outputs, activation=None, name="fc_out"
)(last_layer)
vf = tf.keras.layers.Dense(units=1, activation=None, name="value_out")(
last_layer
)
self.base_model = tf.keras.models.Model(inputs, [output, vf])
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(input_dict["obs"])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class SharedWeightsModel2(TFModelV2):
"""The "other" TFModelV2 using the same shared space as the one above."""
def __init__(
self, observation_space, action_space, num_outputs, model_config, name
):
super().__init__(
observation_space, action_space, num_outputs, model_config, name
)
inputs = tf.keras.layers.Input(observation_space.shape)
# Weights shared with SharedWeightsModel1.
with tf1.variable_scope(
tf1.VariableScope(tf1.AUTO_REUSE, "shared"),
reuse=tf1.AUTO_REUSE,
auxiliary_name_scope=False,
):
last_layer = tf.keras.layers.Dense(
units=64, activation=tf.nn.relu, name="fc1"
)(inputs)
output = tf.keras.layers.Dense(
units=num_outputs, activation=None, name="fc_out"
)(last_layer)
vf = tf.keras.layers.Dense(units=1, activation=None, name="value_out")(
last_layer
)
self.base_model = tf.keras.models.Model(inputs, [output, vf])
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(input_dict["obs"])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
TORCH_GLOBAL_SHARED_LAYER = None
if torch:
# The global, shared layer to be used by both models.
TORCH_GLOBAL_SHARED_LAYER = SlimFC(
64,
64,
activation_fn=nn.ReLU,
initializer=torch.nn.init.xavier_uniform_,
)
class TorchSharedWeightsModel(TorchModelV2, nn.Module):
"""Example of weight sharing between two different TorchModelV2s.
The shared (single) layer is simply defined outside of the two Models,
then used by both Models in their forward pass.
"""
def __init__(
self, observation_space, action_space, num_outputs, model_config, name
):
TorchModelV2.__init__(
self, observation_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
# Non-shared initial layer.
self.first_layer = SlimFC(
int(np.product(observation_space.shape)),
64,
activation_fn=nn.ReLU,
initializer=torch.nn.init.xavier_uniform_,
)
# Non-shared final layer.
self.last_layer = SlimFC(
64,
self.num_outputs,
activation_fn=None,
initializer=torch.nn.init.xavier_uniform_,
)
self.vf = SlimFC(
64,
1,
activation_fn=None,
initializer=torch.nn.init.xavier_uniform_,
)
self._global_shared_layer = TORCH_GLOBAL_SHARED_LAYER
self._output = None
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out = self.first_layer(input_dict["obs"])
self._output = self._global_shared_layer(out)
model_out = self.last_layer(self._output)
return model_out, []
@override(ModelV2)
def value_function(self):
assert self._output is not None, "must call forward first!"
return torch.reshape(self.vf(self._output), [-1])
| apache-2.0 |
cainiaocome/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 243 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 115 | 1270 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
theoryno3/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 226 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
vinhqdang/wikipedia_analysis | lang_model/enwiki/bi_lstm.py | 1 | 5254 | from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
import csv
import numpy as np
from sklearn import metrics, cross_validation
# import pandas
import tensorflow as tf
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell, GRUCell
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from tflearn.layers.embedding_ops import embedding
# from tf.nn.rnn_* import rnn, rnn_cell
# import skflow
import random
import time
# import cPickle as pickle
# from nltk.corpus import stopwords
import string
import getopt
import sys
import os
data_dir = "text" # directory contains text documents
model_size = 2000 # length of output vectors
nb_epochs = 10 # number of training epochs
embedding_size = 300
label_file = "enwikilabel"
MAX_FILE_ID = 50000
cell_size = [128]
dropout_ratio = 0.5
dynamic = True
activation_function = "relu"
learning_rate = 0.001
test_ratio = 0.2
cell_type = "lstm"
try:
opts, args = getopt.getopt(sys.argv[1:],"hd:model_size:epoch:lb:es:",["model_size=","epoch=","es=","cell_size=","dropout=","dynamic=","activation=","rate=","test_ratio=","cell_type="])
except getopt.GetoptError as e:
print ("Error of parameters")
print (e)
print (sys.argv[0] + " -h for help")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('LSTM for Wikipedia classification')
print (sys.argv[0] + " -h for help")
sys.exit ()
elif opt in ("-model_size","--model_size"):
model_size = int (arg)
elif opt in ("-epoch","--epoch"):
nb_epochs = int (arg)
elif opt in ["-es","--es"]:
embedding_size = int (arg)
elif opt in ["--cell_size"]:
cell_size = arg.split (",")
cell_size = [int (x) for x in cell_size]
elif opt in ["--dropout"]:
dropout_ratio = float (arg)
elif opt in ["--dynamic"]:
dynamic = bool (arg)
elif opt in ["--activation"]:
activation_function = arg
elif opt in ["--rate"]:
learning_rate = float (arg)
elif opt in ["--test_ratio"]:
test_ratio = float (arg)
elif opt in ["--cell_type"]:
if (arg != "lstm" and arg != "gru"):
print ("Wrong cell type. Accept only lstm or gru.")
sys.exit(1)
cell_type = arg
### Training data
qualities = ["stub","start","c","b","ga","fa"]
print('Read labels')
def load_label (label_file):
with open (label_file) as f:
return f.read().splitlines()
Y = load_label(label_file)
for i in range(len(Y)):
Y[i] = qualities.index(Y[i])
print('Read content')
def load_content (file_name):
with open(file_name) as f:
return f.read()
X = []
for i in range (MAX_FILE_ID):
file_name = data_dir + '/' + str(i + 1)
if os.path.isfile (file_name):
X.append (load_content(file_name))
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y,
test_size=test_ratio, random_state=2017)
Y_train = to_categorical (Y_train, nb_classes = len (qualities))
Y_test = to_categorical (Y_test, nb_classes = len (qualities))
### Process vocabulary
print('Process vocabulary')
vocab_processor = tflearn.data_utils.VocabularyProcessor(max_document_length = model_size, min_frequency = 0)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.fit_transform(X_test)))
X_train = pad_sequences(X_train, maxlen=model_size, value=0.)
X_test = pad_sequences(X_test, maxlen=model_size, value=0.)
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# pickle.dump (X_train, open ("xtrain.p", b))
# pickle.dump (X_test, open ("xtest.p", b))
# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))
### Models
print('Build model')
net = input_data([None, model_size])
net = embedding(net, input_dim=n_words, output_dim=embedding_size)
if cell_type == "lstm":
for i in range (len (cell_size)):
if i < len(cell_size) - 1:
net = bidirectional_rnn(net, BasicLSTMCell(cell_size[i]), BasicLSTMCell(cell_size[i]), return_seq = True)
net = dropout(net, dropout_ratio)
else:
net = bidirectional_rnn(net, BasicLSTMCell(cell_size[i]), BasicLSTMCell(cell_size[i]))
net = dropout(net, dropout_ratio)
elif cell_type == "gru":
for i in range (len (cell_size)):
if i < len(cell_size) - 1:
net = bidirectional_rnn(net, GRUCell(cell_size[i]), GRUCell(cell_size[i]), return_seq = True)
net = dropout(net, dropout_ratio)
else:
net = bidirectional_rnn(net, GRUCell(cell_size[i]), GRUCell(cell_size[i]))
net = dropout(net, dropout_ratio)
net = fully_connected(net, len (qualities), activation='softmax')
net = regression(net, optimizer='adam', learning_rate=learning_rate,
loss='categorical_crossentropy')
print ('Train model')
model = tflearn.DNN(net, tensorboard_verbose=1, tensorboard_dir = "logdir/bi_lstm")
print ('Predict')
model.fit(X_train, Y_train, validation_set=(X_test, Y_test), show_metric=True,
batch_size=32, n_epoch = nb_epochs) | gpl-2.0 |
UniMOOC/gcb-new-module | modules/data_pump/data_pump.py | 3 | 61410 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enable periodic transmission of DB and job-produced content to BigQuery."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
import base64
import collections
import copy
import datetime
import logging
import os
import random
import re
import time
import urllib
import apiclient
import apiclient.discovery
import httplib2
import oauth2client
import oauth2client.client
from common import catch_and_log
from common import crypto
from common import schema_fields
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import analytics
from models import courses
from models import custom_modules
from models import data_sources
from models import jobs
from models import roles
from models import transforms
from modules.dashboard import dashboard
from modules.dashboard import tabs
from google.appengine.ext import db
from google.appengine.ext import deferred
# CourseBuilder setup strings
XSRF_ACTION_NAME = 'data_pump'
DASHBOARD_ACTION = 'data_pump'
# Separate permission to be able to push user data delegable to non-super-users
ACCESS_PERMISSION = 'push_data'
ACCESS_PERMISSION_DESCRIPTION = 'Can push user data outside CourseBuilder.'
# Connection parameters for discovering and auth to BigQuery.
BIGQUERY_RW_SCOPE = 'https://www.googleapis.com/auth/bigquery'
BIGQUERY_API_NAME = 'bigquery'
BIGQUERY_API_VERSION = 'v2'
# API endpoint for initiating a retryable upload.
BIGQUERY_API_UPLOAD_URL_PREFIX = (
'https://www.googleapis.com/upload/bigquery/v2/projects/')
# UI for BigQuery interactive queries
BIGQUERY_UI_URL_PREFIX = 'https://bigquery.cloud.google.com/table/'
# Max of about 20 min of retries (random exponential backoff from 2^1...2^MAX)
MAX_CONSECUTIVE_FAILURES = 10
MAX_RETRY_BACKOFF_SECONDS = 600
# Config for secret
PII_SECRET_LENGTH = 20
PII_SECRET_DEFAULT_LIFETIME = '30 days'
# Constants for accessing job context settings map
UPLOAD_URL = 'upload_url'
LAST_START_OFFSET = 'last_start_offset'
LAST_END_OFFSET = 'last_end_offset'
LAST_PAGE_SENT = 'last_page_sent'
LAST_PAGE_NUM_ITEMS = 'last_page_num_items'
CONSECUTIVE_FAILURES = 'consecutive_failures'
FAILURE_REASON = 'failure_reason'
ITEMS_UPLOADED = 'items_uploaded'
PII_SECRET = 'pii_secret'
# Constants for items within course settings schema
DATA_PUMP_SETTINGS_SCHEMA_SECTION = 'data_pump'
PROJECT_ID = 'project_id'
DATASET_NAME = 'dataset_name'
JSON_KEY = 'json_key'
TABLE_LIFETIME = 'table_lifetime'
PII_ENCRYPTION_TOKEN = 'pii_encryption_token'
# Discovery service lookup retries constants
DISCOVERY_SERVICE_MAX_ATTEMPTS = 10
DISCOVERY_SERVICE_RETRY_SECONDS = 2
def _get_data_source_class_by_name(name):
source_classes = data_sources.Registry.get_rest_data_source_classes()
for source_class in source_classes:
if source_class.__name__ == name and source_class.exportable():
return source_class
names = [source_class.__name__ for source_class in source_classes]
logging.critical(
'No entry found for data source class with name "%s". '
'Available names are: %s', name, ' '.join(names))
return None
class DataPumpJob(jobs.DurableJobBase):
@staticmethod
def get_description():
"""Job to push data from CourseBuilder to BigQuery.
The job operates from the deferred queue, and takes advantage of the
underlying TaskQueue retry and backoff support. One job is created
for each DataSource (see models/data_source). This job moves data
from the paginated data source up to Google BigQuery via the
retryable POST method.
Jobs here run on the TaskQueue named "default along with all other
CB deferred tasks because that queue has a reasonable set of config
parameters. However, there is nothing about these jobs that
requires interleaving with others if queue parameters need to be
tuned. Functional tests will need to be changed to have
execute_all_deferred_tasks() pass the name of the new queue.
"""
def __init__(self, app_context, data_source_class_name,
no_expiration_date=False, send_uncensored_pii_data=False):
if not _get_data_source_class_by_name(data_source_class_name):
raise ValueError(
'No such data source "%s", or data source is not marked '
'as exportable.' % data_source_class_name)
super(DataPumpJob, self).__init__(app_context)
self._data_source_class_name = data_source_class_name
self._job_name = 'job-datapump-%s-%s' % (self._data_source_class_name,
self._namespace)
self._no_expiration_date = no_expiration_date
self._send_uncensored_pii_data = send_uncensored_pii_data
def non_transactional_submit(self):
"""Callback used when UI gesture indicates this job should start."""
sequence_num = super(DataPumpJob, self).non_transactional_submit()
deferred.defer(self.main, sequence_num)
return sequence_num
def _mark_job_canceled(self, job, message, duration):
"""Override default behavior of setting job.output to error string."""
if job.output:
job_context, data_source_context = self._load_state(
job, job.sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = self._build_data_source_context()
job_context[FAILURE_REASON] = message
self._save_state(jobs.STATUS_CODE_FAILED, job, job.sequence_num,
job_context, data_source_context,
use_transaction=False)
def _build_data_source_context(self):
"""Set up context class specific to data source type we pull from."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
# TODO(mgainer): if we start getting timeout failures, perhaps learn
# proper chunk size from history, rather than using default.
default_chunk_size = data_source_class.get_default_chunk_size()
ret = context_class.build_blank_default({}, default_chunk_size)
if hasattr(ret, 'send_uncensored_pii_data'):
ret.send_uncensored_pii_data = self._send_uncensored_pii_data
return ret
def _build_job_context(self, upload_url, pii_secret):
"""Set up context object used to maintain this job's internal state."""
job_context = {
UPLOAD_URL: upload_url,
LAST_START_OFFSET: 0,
LAST_END_OFFSET: -1,
LAST_PAGE_SENT: -1,
LAST_PAGE_NUM_ITEMS: 0,
CONSECUTIVE_FAILURES: [],
FAILURE_REASON: '',
ITEMS_UPLOADED: 0,
PII_SECRET: pii_secret,
}
return job_context
def _load_state(self, job, sequence_num):
if job.sequence_num != sequence_num:
raise ValueError(
'Abandoning stale job with sequence %d; '
'there is a new job with sequence %d running.' % (
sequence_num, job.sequence_num))
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
content = transforms.loads(job.output)
job_context = content['job_context']
data_source_context_class = data_source_class.get_context_class()
data_source_context = data_source_context_class.build_from_dict(
content['data_source_context'])
return job_context, data_source_context
def _save_state(self, state, job, sequence_num, job_context,
data_source_context, use_transaction=True):
# Job context may have been made with blank values for these two items.
# Recover them from the previous context if they are not set (and if
# the previous context is present enough to have them)
try:
prev_job_context, _ = self._load_state(job, sequence_num)
if not job_context[PII_SECRET]:
job_context[PII_SECRET] = prev_job_context[PII_SECRET]
if not job_context[UPLOAD_URL]:
job_context[UPLOAD_URL] = prev_job_context[UPLOAD_URL]
except (ValueError, AttributeError):
pass
# Convert data source context object to plain dict.
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
data_source_context_dict = context_class.save_to_dict(
data_source_context)
# Set job object state variables.
now = datetime.datetime.now()
job.output = transforms.dumps({
'job_context': job_context,
'data_source_context': data_source_context_dict,
})
job.status_code = state
job.execution_time_sec += int((now - job.updated_on).total_seconds())
job.updated_on = now
logging.info('Data pump job %s saving contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Using _update in DurableJobEntity
# pylint: disable=protected-access
if use_transaction:
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(
xg_on, jobs.DurableJobEntity._update, self._job_name,
sequence_num, job.status_code, job.output,
job.execution_time_sec)
else:
jobs.DurableJobEntity._update(self._job_name, sequence_num,
job.status_code, job.output,
job.execution_time_sec)
@classmethod
def _parse_pii_encryption_token(cls, token):
parts = token.split('/')
return (parts[0],
datetime.datetime(year=1970, month=1, day=1) +
datetime.timedelta(seconds=int(parts[1])))
@classmethod
def _is_pii_encryption_token_valid(cls, token):
try:
_, valid_until_date = cls._parse_pii_encryption_token(token)
return valid_until_date > datetime.datetime.now()
except ValueError:
return False
@classmethod
def _build_new_pii_encryption_token(cls, timedelta_string):
hmac_secret = base64.urlsafe_b64encode(
os.urandom(int(PII_SECRET_LENGTH * 0.75)))
table_lifetime_seconds = common_utils.parse_timedelta_string(
timedelta_string).total_seconds()
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
now = datetime.datetime.now()
table_lifetime_timedelta = datetime.timedelta(
seconds=table_lifetime_seconds)
valid_until_timestamp = int(
(now - unix_epoch + table_lifetime_timedelta).total_seconds())
pii_encryption_token = '%s/%d' % (hmac_secret,
valid_until_timestamp)
return pii_encryption_token
@classmethod
def _get_pii_token(cls, app_context):
"""Retrieve or generate and save a secret used to encrypt exported PII.
All PII data in objects exported to BigQuery is either suppressed
or transformed via a one-way hash using a secret value. The point
of the transformation is so that exported data cannot trivially be
correlated to any individual's data in CourseBuilder, but records
in exported data encoded using the same key can. (E.g., a user_id
is the key for students; this key should be usable to correlate a
user's language preference with his test scores.)
Once data has been exported from CourseBuilder to BigQuery, the
internal permissions from CourseBuilder no longer apply. To minimize
the ability of those with access to the data to perform long-term
correlations that might identify individuals, the secret used to
encode PII is automatically rotated on a period determined by the
course settings. We re-use the expiration period for tables, or
default to 30 days if no period is selected.
The format for the stored setting is a string composed of:
- A randomly-generated secret encoded as a base-64 string
- A slash character ('/')
- A Unix timestamp indicating the expiration date of the token.
The expiration date approach is chosen so that within the expiration
period, different data sources can be re-exported multiple times, but
still correlated with one another in BigQuery. Upon expiration, a
new token is generated and used. Data exported before and after the
changeover cannot be directly correlated. (It may be possible to
force a correlation if old versions of the data tables were downloaded
by comparing non-key fields in the old/new versions, if the non-key
fields are sufficiently discriminative)
Args:
app_context: Standard CB application context object.
Returns:
Secret string used for encoding PII data upon export.
"""
course_settings = app_context.get_environ()
pump_settings = course_settings.get(DATA_PUMP_SETTINGS_SCHEMA_SECTION,
{})
pii_encryption_token = pump_settings.get(PII_ENCRYPTION_TOKEN)
if (not pii_encryption_token or
not cls._is_pii_encryption_token_valid(pii_encryption_token)):
# If table_lifetime is missing OR is set to the empty string,
# prefer the default value.
lifetime = (pump_settings.get(TABLE_LIFETIME) or
PII_SECRET_DEFAULT_LIFETIME)
pii_encryption_token = cls._build_new_pii_encryption_token(lifetime)
pump_settings[PII_ENCRYPTION_TOKEN] = pii_encryption_token
course = courses.Course(None, app_context=app_context)
course.save_settings(course_settings)
return pii_encryption_token
@classmethod
def _get_pii_secret(cls, app_context):
secret, _ = cls._parse_pii_encryption_token(
cls._get_pii_token(app_context))
return secret
def _get_bigquery_settings(self, app_context):
"""Pull settings necessary for using BigQuery from DB.
This is nice and verbose and paranoid, so that if there is any
misconfiguration, the end-user gets a nice message that's specific
about the particular problem, rather than just a KeyError or
ValueError.
Args:
app_context: The standard app context for the course in question.
Returns:
A namedtuple containing private_key, client_email, project_id
and dataset_id members. The first three are required to connect
to BigQuery, and the last is the dataset within BigQuery to
which the data pump will restrict itself for insert/write/delete
operations.
Raises:
ValueError: if any expected element is missing or malformed.
"""
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
dataset_id = (
pump_settings.get(DATASET_NAME) or
re.sub('[^0-9a-z_:-]', '', app_context.get_slug().lower()) or
'course')
project_id = pump_settings.get(PROJECT_ID)
if not project_id:
raise ValueError('Cannot pump data without a course settings value '
'for the target Google BigQuery project ID')
json_key = pump_settings.get(JSON_KEY)
if not json_key:
raise ValueError('Cannot pump data without a JSON client key '
'allowing access to the target Google BigQuery '
'project')
try:
json_key = transforms.loads(json_key)
except ValueError:
raise ValueError('Cannot decode JSON client key for the target '
'Google BigQuery project.')
if 'private_key' not in json_key or 'client_email' not in json_key:
raise ValueError('The JSON client key for the target Google '
'BigQuery project does not seem to be well '
'formed; either the "private_key" or '
'"client_email" field is missing.')
# If table_lifetime setting is missing OR is set to the empty string,
# prefer the default value.
table_lifetime_seconds = common_utils.parse_timedelta_string(
pump_settings.get(TABLE_LIFETIME) or PII_SECRET_DEFAULT_LIFETIME
).total_seconds()
Settings = collections.namedtuple('Settings', [
'private_key', 'client_email', PROJECT_ID, 'dataset_id',
'table_lifetime_seconds'])
return Settings(json_key['private_key'], json_key['client_email'],
project_id, dataset_id, table_lifetime_seconds)
def _get_bigquery_service(self, bigquery_settings):
"""Get BigQuery API client plus HTTP client with auth credentials."""
credentials = oauth2client.client.SignedJwtAssertionCredentials(
bigquery_settings.client_email, bigquery_settings.private_key,
BIGQUERY_RW_SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
# Discovery.build has a timeout that's a little too aggressive. Since
# this happens before we even have our job_context built, any errors
# returned from here will be fatal. Since that's the case, add some
# extra forgiveness here by retrying several times, with a little bit
# of wait thrown in to allow the discovery service to recover, in case
# it really is just having a bad few moments.
attempts = 0
while True:
try:
return apiclient.discovery.build(
BIGQUERY_API_NAME, BIGQUERY_API_VERSION, http=http), http
# pylint: disable=broad-except
except Exception, ex:
attempts += 1
if attempts >= DISCOVERY_SERVICE_MAX_ATTEMPTS:
raise
logging.warning(
'Ignoring HTTP connection timeout %d of %d',
attempts, DISCOVERY_SERVICE_MAX_ATTEMPTS)
time.sleep(DISCOVERY_SERVICE_RETRY_SECONDS)
def _maybe_create_course_dataset(self, service, bigquery_settings):
"""Create dataset within BigQuery if it's not already there."""
datasets = service.datasets()
try:
datasets.get(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
datasets.insert(projectId=bigquery_settings.project_id,
body={
'datasetReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id
}}).execute()
def _maybe_delete_previous_table(self, tables, bigquery_settings,
data_source_class):
"""Delete previous version of table for data source, if it exists."""
# TODO(mgainer): Make clobbering old table and replacing optional.
# For now, we assume people will be writing queries in terms of
# a single table name, and will be irritated at having to change
# their queries all the time if we add a timestamp to the table
# name. And no, AFAICT, the BigQuery API does not permit renaming
# of tables, just creation and deletion.
table_name = data_source_class.get_name()
try:
tables.delete(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
tableId=table_name).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
def _json_schema_member_to_bigquery_schema(self, name, structure):
item = {'name': name}
if 'description' in structure:
item['description'] = structure['description']
if 'properties' in structure: # It's a sub-registry.
item['type'] = 'RECORD'
item['mode'] = 'NULLABLE'
item['fields'] = self._json_schema_to_bigquery_schema(
structure['properties'])
elif 'items' in structure: # It's an array
if 'items' in structure['items']:
raise ValueError(
'BigQuery schema descriptions do not support nesting '
'arrays directly in other arrays. Instead, nest '
'structures in arrays; those structures may contain '
'sub-arrays. Problem arises trying to pump data for %s' %
self._data_source_class_name)
item = self._json_schema_member_to_bigquery_schema(
name, structure['items'])
item['mode'] = 'REPEATED'
else:
item['mode'] = ('NULLABLE' if structure.get('optional')
else 'REQUIRED')
if structure['type'] in ('string', 'text', 'html', 'url', 'file'):
item['type'] = 'STRING'
elif structure['type'] in 'integer':
item['type'] = 'INTEGER'
elif structure['type'] in 'number':
item['type'] = 'FLOAT'
elif structure['type'] in 'boolean':
item['type'] = 'BOOLEAN'
elif structure['type'] in ('date', 'datetime', 'timestamp'):
# BigQuery will accept ISO-formatted datetimes as well as
# integer seconds-since-epoch as timestamps.
item['type'] = 'TIMESTAMP'
else:
raise ValueError(
'Unrecognized schema scalar type "%s" '
'when trying to make schema for data-pumping %s' % (
structure['type'], self._data_source_class_name))
return item
def _json_schema_to_bigquery_schema(self, json_schema_dict):
fields = []
for name, structure in json_schema_dict.iteritems():
fields.append(self._json_schema_member_to_bigquery_schema(
name, structure))
return fields
def _create_data_table(self, tables, bigquery_settings, schema,
data_source_class):
"""Instantiate and provide schema for new BigQuery table."""
table_name = data_source_class.get_name()
request = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'schema': {'fields': schema}
}
# If user has requested it, set the time at which table should be
# reclaimed (as milliseconds since Unix epoch).
if (bigquery_settings.table_lifetime_seconds and
not self._no_expiration_date):
now = datetime.datetime.utcnow()
expiration_delta = datetime.timedelta(
seconds=bigquery_settings.table_lifetime_seconds)
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
expiration_ms = int(
(now + expiration_delta - unix_epoch).total_seconds()) * 1000
request['expirationTime'] = expiration_ms
# Allow exceptions from here to propagate; we don't expect any problems,
# so if we have any, the upload should abort.
tables.insert(
projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
body=request).execute()
def _create_upload_job(self, http, bigquery_settings, data_source_class):
"""Before uploading, we must create a job to handle the upload.
Args:
http: An HTTP client object configured to send our auth token
bigquery_settings: Configs for talking to bigquery.
Returns:
URL specific to this upload job. Subsequent PUT requests to send
pages of data must be sent to this URL.
Raises:
Exception: on unexpected responses from BigQuery API.
"""
uri = '%s%s/jobs?uploadType=resumable' % (
BIGQUERY_API_UPLOAD_URL_PREFIX, bigquery_settings.project_id)
headers = {
'Content-Type': 'application/json',
'X-Upload-Content-Type': 'application/octet-stream',
}
table_name = data_source_class.get_name()
body = transforms.dumps({
'kind': 'bigquery#job',
'configuration': {
'load': {
'createDisposition': 'CREATE_NEVER', # Already exists.
'destinationTable': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'ignoreUnknownValues': False,
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
}
}
})
response, content = http.request(uri, method='POST',
body=body, headers=headers)
if int(response.get('status', 0)) != 200:
raise Exception('Got non-200 response when trying to create a '
'new upload job. Reponse was: "%s"; content '
'was "%s"' % (str(response), str(content)))
location = response.get('location')
if not location:
raise Exception('Expected response to contain a "location" item '
'giving a URL to send subsequent content to, but '
'instead got "%s"' % str(response))
return location
def _initiate_upload_job(self, bigquery_service, bigquery_settings, http,
app_context, data_source_context):
"""Coordinate table cleanup, setup, and initiation of upload job."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
table_schema = data_source_class.get_schema(app_context, catch_and_log_,
data_source_context)
schema = self._json_schema_to_bigquery_schema(table_schema)
tables = bigquery_service.tables()
self._maybe_create_course_dataset(bigquery_service, bigquery_settings)
self._maybe_delete_previous_table(tables, bigquery_settings,
data_source_class)
self._create_data_table(tables, bigquery_settings, schema,
data_source_class)
upload_url = self._create_upload_job(http, bigquery_settings,
data_source_class)
return upload_url
def _note_retryable_failure(self, message, job_context):
"""Log a timestamped message into the job context object."""
timestamp = datetime.datetime.now().strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
job_context[CONSECUTIVE_FAILURES].append(timestamp + ' ' + message)
def _randomized_backoff_timeout(self, job_context):
num_failures = len(job_context[CONSECUTIVE_FAILURES])
if not num_failures:
return 0
return min(MAX_RETRY_BACKOFF_SECONDS,
random.randrange(2 ** num_failures, 2 ** (num_failures + 1)))
def _check_upload_state(self, http, job_context):
"""Check with the BigQuery upload server to get state of our upload.
Due to various communication failure cases, we may not be aware of
the actual state of the upload as known to the server. Issue a blank
PUT request to evoke a response that will indicate:
- How far along we are in the upload
- Whether the upload has already completed
- Whether the upload job has taken too long and expired
Args:
http: An HTTP client object configured to send our auth token
job_context: Hash containing configuration for this upload job.
Returns:
A 2-tuple of next page to load (or None if no page should be
loaded), and the next jobs.STATUS_CODE_<X> to transition to.
"""
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
headers={'Content-Range': 'bytes */*'})
return self._handle_put_response(response, job_context, is_upload=False)
def _send_data_page_to_bigquery(self, data, is_last_chunk, next_page,
http, job, sequence_num, job_context,
data_source_context):
if next_page == 0 and is_last_chunk and not data:
return jobs.STATUS_CODE_COMPLETED
# BigQuery expects one JSON object per newline-delimed record,
# not a JSON array containing objects, so convert them individually.
# Less efficient, but less hacky than converting and then string
# manipulation.
lines = []
total_len = 0
for item in data:
line = transforms.dumps(item)
line += '\n'
total_len += len(line)
lines.append(line)
# Round data size up to next multiple of 256K, per
# https://cloud.google.com/bigquery/loading-data-post-request#chunking
padding_amount = 0
if not is_last_chunk:
round_to = 256 * 1024
if total_len % round_to:
padding_amount = round_to - (total_len % round_to)
lines.append(' ' * padding_amount)
payload = ''.join(lines)
# We are either re-attempting to send a page, or sending a new page.
# Adjust the job_context's last-sent state to reflect this.
job_context[LAST_PAGE_NUM_ITEMS] = len(data)
if next_page == job_context[LAST_PAGE_SENT]:
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
elif next_page == job_context[LAST_PAGE_SENT] + 1:
job_context[LAST_PAGE_SENT] = next_page
job_context[LAST_START_OFFSET] = (
job_context[LAST_END_OFFSET] + 1)
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
else:
raise Exception(
'Internal error - unexpected condition in sending page. '
'next_page=%d last_page=%d, num_items=%d' % (
next_page, job_context[LAST_PAGE_SENT], len(data)))
logging.info(
'Sending to BigQuery. %d items; %d padding bytes; is-last: %s',
len(data), padding_amount, str(is_last_chunk))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
(job_context[LAST_END_OFFSET] + 1) if is_last_chunk else '*')
}
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
body=payload, headers=headers)
_, next_state = self._handle_put_response(response, job_context,
is_upload=True)
return next_state
def _handle_put_response(self, response, job_context, is_upload=True):
"""Update job_context state depending on response from BigQuery."""
status = int(response['status'])
logging.info('Response from bigquery: %d; %s', status, str(response))
next_page = None
next_status = jobs.STATUS_CODE_STARTED
if status == 308:
# Google's push-partial-data usurps the usual meaning of 308 to
# instead mean "partial request incomplete"; here, it's telling
# us that the request has partially completed, and it will give
# us a Range: header to indicate how far it thinks we've gone.
# We only care about the upper end of the range.
if 'range' not in response:
last_offset_received = -1
else:
last_offset_received = int(response['range'].split('-')[1])
if last_offset_received == job_context[LAST_END_OFFSET]:
# The nominal case; the reported index of the last byte
# received exactly matches what we think we sent. Tell our
# caller we are ready to try the next page, and count up
# the total number of items sent only now that we have seen
# the receiving side's acknowledgement.
next_page = job_context[LAST_PAGE_SENT] + 1
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
# Don't clear the list of failures if this is handling the
# pre-check done before uploading. Experiments show that
# persistent problems with our requests result in 503's on
# upload, but 308's (reporting no progress made) on check.
# We want to eventually fail out if we're constantly getting
# errors, so ignore the "success" on checking status.
if is_upload:
job_context[CONSECUTIVE_FAILURES] = []
elif (last_offset_received >= job_context[LAST_START_OFFSET] - 1 and
last_offset_received < job_context[LAST_END_OFFSET]):
# If the last offset received is not the same as the last offset
# sent, that's possibly OK; verify that the last offset received
# is sane. Here, "sane" means that we accept seeing the
# last offset of the previous page sent (last_start_offset-1)
# up to, but not including the last_end_offset (for the page
# we just sent). Anything lower means that our algorithm
# mistakenly skipped past a failure. Anything higher means
# that we have somehow become confused and decided to step
# backward (or BigQuery is lying to us).
prev_page_size = (job_context[LAST_END_OFFSET] -
job_context[LAST_START_OFFSET] + 1)
bytes_received = (last_offset_received -
job_context[LAST_START_OFFSET] + 1)
self._note_retryable_failure(
'Incomplete upload detected - %d of %d bytes received '
'for page %d' %
(bytes_received, prev_page_size,
job_context[LAST_PAGE_SENT]), job_context)
next_page = job_context[LAST_PAGE_SENT]
else:
raise ValueError(
'Uploaded byte count of %d does not fall in the range '
'%d to %d, the start/end range for previously-sent page '
'number %d. Abandoning upload.' % (
last_offset_received, job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
job_context[LAST_PAGE_SENT]))
elif status in (200, 201):
# BigQuery confirms that it has seen the upload complete. (Note
# that this is *not* a promise that the upload has parsed
# correctly; there doesn't seem to be a clean way to ask about
# that other than to probe the table for number of rows uploaded
# until we see the desired number or time out. Ick.)
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
next_status = jobs.STATUS_CODE_COMPLETED
elif status == 404:
# Unlikely, but possible. For whatever reason, BigQuery has
# decided that our upload URL is no longer valid. (Docs say that
# we are allowed up to a day to get an upload done, but do not
# promise that this is the only reason a job may become invalid.)
# We need to start again from scratch. To start over, we will
# just skip uploading a data page this round, and set ourselves up
# to be called back again from the deferred-tasks queue. When the
# callback happens, STATUS_CODE_QUEUED will indicate we need to
# re-init everything from scratch.
next_status = jobs.STATUS_CODE_QUEUED
elif status in (500, 502, 503, 504):
# Server Error, Bad Gateway, Service Unavailable or Gateway Timeout.
# In all of these cases, we do a randomized exponential delay before
# retrying.
self._note_retryable_failure('Retryable server error %d' % status,
job_context)
else:
raise ValueError(
'Got unexpected status code %d from BigQuery in response %s' %
(status, str(response)))
return next_page, next_status
def _fetch_page_data(self, app_context, data_source_context, next_page):
"""Get the next page of data from the data source."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
is_last_page = False
with catch_and_log_.propagate_exceptions('Loading page of data'):
schema = data_source_class.get_schema(app_context, catch_and_log_,
data_source_context)
required_jobs = data_sources.utils.get_required_jobs(
data_source_class, app_context, catch_and_log_)
data, _ = data_source_class.fetch_values(
app_context, data_source_context, schema, catch_and_log_,
next_page, *required_jobs)
# BigQuery has a somewhat unfortunate design: It does not attempt
# to parse/validate the data we send until all data has been
# uploaded and the upload has been declared a "success". Rather
# than having to poll for an indefinite amount of time until the
# upload is parsed, we validate that the sent items exactly match
# the declared schema. Somewhat expensive, but better than having
# completely unreported hidden failures.
for index, item in enumerate(data):
complaints = transforms.validate_object_matches_json_schema(
item, schema)
if complaints:
raise ValueError(
'Data in item to pump does not match schema! ' +
'Item is item number %d ' % index +
'on data page %d. ' % next_page +
'Problems for this item are:\n' +
'\n'.join(complaints))
if (data_source_class.get_default_chunk_size() == 0 or
not hasattr(data_source_context, 'chunk_size') or
len(data) < data_source_context.chunk_size):
is_last_page = True
else:
# Here, we may have read to the end of the table and just
# happened to end up on an even chunk boundary. Attempt to
# read one more row so that we can discern whether we really
# are at the end.
# Don't use the normal data_source_context; we don't want it
# to cache a cursor for the next page that will only retrieve
# one row.
throwaway_context = copy.deepcopy(data_source_context)
throwaway_context.chunk_size = 1
next_data, actual_page = data_source_class.fetch_values(
app_context, throwaway_context, schema, catch_and_log_,
next_page + 1, *required_jobs)
if not next_data or actual_page == next_page:
is_last_page = True
return data, is_last_page
def _send_next_page(self, sequence_num, job):
"""Coordinate table setup, job setup, sending pages of data."""
# Gather necessary resources
app_context = sites.get_course_index().get_app_context_for_namespace(
self._namespace)
pii_secret = self._get_pii_secret(app_context)
bigquery_settings = self._get_bigquery_settings(app_context)
bigquery_service, http = self._get_bigquery_service(bigquery_settings)
# If this is our first call after job start (or we have determined
# that we need to start over from scratch), do initial setup.
# Otherwise, re-load context objects from saved version in job.output
if job.status_code == jobs.STATUS_CODE_QUEUED:
data_source_context = self._build_data_source_context()
upload_url = self._initiate_upload_job(
bigquery_service, bigquery_settings, http, app_context,
data_source_context)
job_context = self._build_job_context(upload_url, pii_secret)
else:
job_context, data_source_context = self._load_state(
job, sequence_num)
if hasattr(data_source_context, 'pii_secret'):
data_source_context.pii_secret = pii_secret
if self._send_uncensored_pii_data:
data_source_context.send_uncensored_pii_data = True
logging.info('Data pump job %s loaded contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Check BigQuery's state. Based on that, choose the next page of data
# to push. Depending on BigQuery's response, we may or may not be
# able to send a page now.
next_page, next_state = self._check_upload_state(http, job_context)
if next_page is not None:
data, is_last_chunk = self._fetch_page_data(
app_context, data_source_context, next_page)
next_state = self._send_data_page_to_bigquery(
data, is_last_chunk, next_page,
http, job, sequence_num, job_context, data_source_context)
self._save_state(next_state, job, sequence_num, job_context,
data_source_context)
# If we are not done, enqueue another to-do item on the deferred queue.
if len(job_context[CONSECUTIVE_FAILURES]) >= MAX_CONSECUTIVE_FAILURES:
raise Exception('Too many consecutive failures; abandoning job.')
elif not job.has_finished:
backoff_seconds = self._randomized_backoff_timeout(job_context)
logging.info('%s re-queueing for subsequent work', self._job_name)
deferred.defer(self.main, sequence_num, _countdown=backoff_seconds)
else:
logging.info('%s complete', self._job_name)
def main(self, sequence_num):
"""Callback entry point. Manage namespaces, failures; send data."""
logging.info('%s de-queued and starting work.', self._job_name)
job = self.load()
if not job:
raise deferred.PermanentTaskFailure(
'Job object for %s not found!' % self._job_name)
if job.has_finished:
return # We have been canceled; bail out immediately.
with common_utils.Namespace(self._namespace):
try:
self._send_next_page(sequence_num, job)
except Exception, ex:
common_utils.log_exception_origin()
logging.critical('%s: job abandoned due to fatal error %s',
self._job_name, str(ex))
# Log failure in job object as well.
if job.output:
job_context, data_source_context = self._load_state(
job, sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = (self._build_data_source_context())
job_context[FAILURE_REASON] = str(ex)
self._save_state(jobs.STATUS_CODE_FAILED, job, sequence_num,
job_context, data_source_context)
# PermanentTaskFailure tells deferred queue to give up on us.
raise deferred.PermanentTaskFailure('Job %s failed: %s' % (
self._job_name, str(ex)))
def get_display_dict(self, app_context):
"""Set up dict for Jinja rendering on data_pump.html."""
data_source_context = self._build_data_source_context()
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
ret = {
'name': self._data_source_class_name,
'title': data_source_class.get_title(),
'status': 'Has Never Run',
'active': False,
}
job = self.load()
if job:
ret['status'] = jobs.STATUS_CODE_DESCRIPTION[job.status_code]
ret['active'] = not job.has_finished
ret['sequence_number'] = job.sequence_num
ret['updated_on'] = job.updated_on.strftime(
utils.HUMAN_READABLE_TIME_FORMAT)
if job.has_finished:
duration = job.execution_time_sec
else:
duration = int((datetime.datetime.now() -
job.updated_on) .total_seconds())
ret['duration'] = datetime.timedelta(days=0, seconds=duration)
ret['last_updated'] = job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
bigquery_settings = self._get_bigquery_settings(app_context)
ret['bigquery_url'] = '%s%s:%s.%s' % (
BIGQUERY_UI_URL_PREFIX, bigquery_settings.project_id,
bigquery_settings.dataset_id, data_source_class.get_name())
try:
job_context, data_source_context = self._load_state(
job, job.sequence_num)
ret['job_context'] = job_context
current_secret = DataPumpJob._get_pii_secret(app_context)
if job_context[PII_SECRET] != current_secret:
ret['pii_secret_is_out_of_date'] = True
del job_context[PII_SECRET]
except (ValueError, AttributeError):
# When jobs framework catches a failure, it overwrites the
# job.output with the failure message as a string. We will
# get here if we fail to parse job.output as a JSON-packed
# object.
ret['message'] = job.output
ret['source_url'] = '%s/rest/data/%s/items?chunk_size=10' % (
app_context.get_slug(), data_source_class.get_name())
catch_and_log_ = catch_and_log.CatchAndLog()
ret['schema'] = data_source_class.get_schema(
app_context, catch_and_log_, data_source_context)
ret['generator_statuses'] = []
ret['available'] = True
ret['any_generator_running'] = False
required_generators = data_source_class.required_generators()
if not required_generators:
ret['generator_statuses'].append(
{'message': '(No dependencies)', 'link': None})
ret['has_any_generators'] = False
else:
ret['has_any_generators'] = True
for generator_class in required_generators:
generator = generator_class(app_context)
job = generator.load()
message = analytics.display.get_generator_status_message(
generator_class, job)
link = analytics.display.get_pipeline_link(
crypto.XsrfTokenManager, app_context, generator_class, job)
ret['generator_statuses'].append({'message': message, 'link': link})
if not job or job.status_code != jobs.STATUS_CODE_COMPLETED:
ret['available'] = False
if job and not job.has_finished:
ret['any_generator_running'] = True
return ret
class DataPumpJobsDataSource(data_sources.SynchronousQuery):
"""Present DataPump job status as an analytic generated at page-render time.
This is a very mild hack. Since the data pump job controls show up as a
sub-tab under Dashboard -> Analytics, the easiest way to generate tab
content is to act as though we are an analytic. And we are, in a sense -
this analytic just happens to generate a table of data-pump job statuses,
rather than analytics about student performance. This also conveniently
re-uses all the mechanics for authorization, dispatch, page-painting, etc.
"""
@staticmethod
def required_generators():
return []
@staticmethod
def fill_values(app_context, template_values):
template_values['xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token(XSRF_ACTION_NAME))
template_values['exit_url'] = urllib.urlencode({
'exit_url': 'dashboard?%s' % urllib.urlencode({
'action': 'analytics',
'tab': 'data_pump'})})
source_classes = [
ds for ds in data_sources.Registry.get_rest_data_source_classes()
if ds.exportable()]
source_classes.sort(key=lambda c: c.get_title())
# pylint: disable=protected-access
template_values['pumps'] = []
for source_class in source_classes:
job = DataPumpJob(app_context, source_class.__name__)
template_values['pumps'].append(job.get_display_dict(app_context))
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
template_values['need_settings'] = (
not pump_settings.has_key(PROJECT_ID) or
not pump_settings.has_key(JSON_KEY))
# If table_lifetime setting is missing OR is set to the empty string,
# prefer the default value.
template_values['default_lifetime'] = (
pump_settings.get(TABLE_LIFETIME) or PII_SECRET_DEFAULT_LIFETIME)
template_values[DATASET_NAME] = (
pump_settings.get(DATASET_NAME) or
re.sub('[^0-9a-z_:-]', '', app_context.get_slug().lower()) or
'course')
custom_module = None
class DashboardExtension(object):
"""Respond to UI run/cancel commands for individual data pump jobs."""
@classmethod
def register(cls):
# Register new permission for pushing student data to external location.
dashboard.DashboardHandler.add_external_permission(
ACCESS_PERMISSION, ACCESS_PERMISSION_DESCRIPTION)
# Register a new Analytics sub-tab for showing data pump status and
# start/stop buttons.
data_pump_visualization = analytics.Visualization(
'data_pumps', 'Data Pumps', 'data_pump.html',
data_source_classes=[DataPumpJobsDataSource])
tabs.Registry.register('analytics', 'data_pump', 'Data Pump',
[data_pump_visualization])
def post_action(handler):
cls(handler).post_data_pump()
dashboard.DashboardHandler.post_actions.append(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION,
post_action)
dashboard.DashboardHandler.map_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
@classmethod
def unregister(cls):
dashboard.DashboardHandler.post_actions.remove(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION, None)
dashboard.DashboardHandler.unmap_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
dashboard.DashboardHandler.remove_external_permission(ACCESS_PERMISSION)
roles.Roles.unregister_permissions(custom_module)
def post_data_pump(self):
source_name = self.handler.request.get('data_source')
data_source_class = _get_data_source_class_by_name(source_name)
if data_source_class:
action = self.handler.request.get('pump_action')
data_pump_job = DataPumpJob(
self.handler.app_context, source_name,
self.handler.request.get('no_expiration_date') == 'True',
self.handler.request.get('send_uncensored_pii_data') == 'True')
if action == 'start_pump':
data_pump_job.submit()
elif action == 'cancel_pump':
data_pump_job.cancel()
elif action == 'run_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).submit()
elif action == 'cancel_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).cancel()
self.handler.redirect(self.handler.get_action_url(
'analytics', extra_args={'tab': 'data_pump'}, fragment=source_name))
def __init__(self, handler):
self.handler = handler
def register_module():
"""Adds this module to the registry. Called once at startup."""
def validate_project_id(value, errors):
if not value:
return
if not re.match('^[a-z][-a-z0-9]{4,61}[a-z0-9]$', value):
errors.append(
'Project IDs must contain 6-63 lowercase letters, digits, '
'or dashes. IDs must start with a letter and may not end '
'with a dash.')
project_id = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PROJECT_ID,
'Project ID', 'string', validator=validate_project_id,
description='The ID (not the name!) of the Project to which to '
'send data. See the list of projects and their IDs at '
'https://console.developers.google.com/project',
i18n=False)
dataset_name = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + DATASET_NAME,
'Dataset Name', 'string',
description='Name of the BigQuery dataset to which to pump tables. '
'If not set, this will default to the name of the course.',
optional=True, i18n=False)
def validate_json_key(json_key, errors):
if not json_key:
return
try:
json_key = transforms.loads(json_key or '')
if 'private_key' not in json_key or 'client_email' not in json_key:
errors.append(
'The JSON client key for allowing access to push data '
'to BigQuery is missing either the "private_key" or '
'"client_email" field (or both). Please check that you '
'have copied the entire contents of the JSON key file '
'you downloaded using the Credentials screen in the '
'Google Developers Console.')
except ValueError, ex:
errors.append(
'The JSON key field doesn\'t seem to contain valid JSON. '
'Please check that you have copied all of the content of the '
'JSON file you downloaded using the Credentials screen in the '
'Google Developers Console. Also, be sure that you are '
'pasting in the JSON version, not the .p12 (PKCS12) file.' +
str(ex))
json_key = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + JSON_KEY,
'JSON Key', 'text',
i18n=False, validator=validate_json_key,
description='Contents of a JSON key created in the Developers Console '
'for the instance where BigQuery is to be run. See '
# TODO(mgainer): Get CB location of instructions to get client key
# for destination application.
'the instructions at ')
def validate_table_lifetime(value, errors):
if not value:
return
seconds = common_utils.parse_timedelta_string(value).total_seconds()
if not seconds:
errors.append(
'The string "%s" ' % value +
'has some problems; please check the instructions below '
'the field for instructions on accepted formats.')
table_lifetime = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + TABLE_LIFETIME,
'Table Lifetime', 'string',
optional=True, i18n=False,
validator=validate_table_lifetime,
description='Amount of time a table pushed to BigQuery will last. '
'After this amount of time, the table will be automatically deleted. '
'(This is useful if your data retention or privacy policy mandates '
'a limited time for analysis after which personal data must be '
'removed.) Leaving this field blank will use the default value '
'of "' + PII_SECRET_DEFAULT_LIFETIME + '". Supported units are: '
'"weeks", "days", "hours", "minutes", "seconds". Units may be '
'specified as their first letter, singular, or plural. Spaces '
'and commas may be used or omitted. E.g., both of the following '
'are equivalent: "3w1d7h", "3 weeks, 1 day, 7 hours"')
pii_encryption_token = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PII_ENCRYPTION_TOKEN,
'PII Encryption Token', 'string',
optional=True, i18n=False, editable=False,
description='Automatically generated encryption secret used to '
'obscure PII fields when these are pushed to BigQuery. This '
'key lasts only as long as the Table Lifetime setting above, or '
'30 days if the limit is not set. After this secret has expired, '
'a new secret will be generated. PII items with the same un-obscured '
'value which are obscured with different values for this secret will '
'have different values. Most importantly, this means that joins on '
'fields that should be the same (e.g., user ID) will not work.')
course_settings_fields = (
lambda c: project_id,
lambda c: json_key,
lambda c: dataset_name,
lambda c: table_lifetime,
lambda c: pii_encryption_token,
)
def on_module_enabled():
data_sources.Registry.register(DataPumpJobsDataSource)
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION] += course_settings_fields
tabs.Registry.register('settings', 'data_pump', 'Data Pump',
DATA_PUMP_SETTINGS_SCHEMA_SECTION)
DashboardExtension.register()
def on_module_disabled():
for field in course_settings_fields:
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION].remove(field)
DashboardExtension.unregister()
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Data Pump', 'Pushes DB and generated content to a BigQuery project',
[], [],
notify_module_enabled=on_module_enabled,
notify_module_disabled=on_module_disabled)
return custom_module
# Since this module contains a registry which may be populated from other
# modules, we here import 'main' so that we are ensured that by the time this
# module is loaded, the global code in 'main' has been run (either by this
# import, or prior). Note that we must do this import strictly after we
# declare register_module(): If this import actually runs the code in main,
# this module must have declared its own register_module() method so that the
# the registration code can see it.
# pylint: disable=unused-import
import main
| apache-2.0 |
PatrickOReilly/scikit-learn | examples/cluster/plot_digits_linkage.py | 366 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
eranchetz/nupic | tests/swarming/nupic/swarming/experiments/simple_cla_multistep/description.py | 32 | 13701 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'consumption', 'sum'),
],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'type': 'DateEncoder',
'timeOfDay': (21, 1)},
'timestamp_dayOfWeek': {
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder',
'dayOfWeek': (21, 1)},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'type': 'AdaptiveScalarEncoder',
'clipInput': True,
'n': 100,
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
theoryno3/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 317 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
Vvkmnn/books | TensorFlowForMachineIntelligence/chapters/04_machine_learning_basics/softmax.py | 1 | 3143 | # Softmax example in TF using the classical Iris dataset
# Download iris.data from https://archive.ics.uci.edu/ml/datasets/Iris
import tensorflow as tf
import os
# this time weights form a matrix, not a column vector, one "weight vector" per class.
W = tf.Variable(tf.zeros([4, 3]), name="weights")
# so do the biases, one per class.
b = tf.Variable(tf.zeros([3], name="bias"))
def combine_inputs(X):
return tf.matmul(X, W) + b
def inference(X):
return tf.nn.softmax(combine_inputs(X))
def loss(X, Y):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(combine_inputs(X), Y))
def read_csv(batch_size, file_name, record_defaults):
filename_queue = tf.train.string_input_producer([os.path.dirname(__file__) + "/" + file_name])
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
# decode_csv will convert a Tensor from type string (the text line) in
# a tuple of tensor columns with the specified defaults, which also
# sets the data type for each column
decoded = tf.decode_csv(value, record_defaults=record_defaults)
# batch actually reads the file and loads "batch_size" rows in a single tensor
return tf.train.shuffle_batch(decoded,
batch_size=batch_size,
capacity=batch_size * 50,
min_after_dequeue=batch_size)
def inputs():
sepal_length, sepal_width, petal_length, petal_width, label =\
read_csv(100, "iris.data", [[0.0], [0.0], [0.0], [0.0], [""]])
# convert class names to a 0 based class index.
label_number = tf.to_int32(tf.argmax(tf.to_int32(tf.pack([
tf.equal(label, ["Iris-setosa"]),
tf.equal(label, ["Iris-versicolor"]),
tf.equal(label, ["Iris-virginica"])
])), 0))
# Pack all the features that we care about in a single matrix;
# We then transpose to have a matrix with one example per row and one feature per column.
features = tf.transpose(tf.pack([sepal_length, sepal_width, petal_length, petal_width]))
return features, label_number
def train(total_loss):
learning_rate = 0.01
return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)
def evaluate(sess, X, Y):
predicted = tf.cast(tf.arg_max(inference(X), 1), tf.int32)
print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))
# Launch the graph in a session, setup boilerplate
with tf.Session() as sess:
tf.initialize_all_variables().run()
X, Y = inputs()
total_loss = loss(X, Y)
train_op = train(total_loss)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# actual training loop
training_steps = 1000
for step in range(training_steps):
sess.run([train_op])
# for debugging and learning purposes, see how the loss gets decremented thru training steps
if step % 10 == 0:
print "loss: ", sess.run([total_loss])
evaluate(sess, X, Y)
coord.request_stop()
coord.join(threads)
sess.close()
| gpl-3.0 |
previtus/MGR-Project-Code | Settings/set1-test_of_models_against_datasets/models_30m_640px.py | 1 | 2240 | def Setup(Settings,DefaultModel):
# set1-test_of_models_against_datasets/models_30m_640px.py
Settings["experiment_name"] = "set1c_Models_Test_30m_640px"
Settings["graph_histories"] = ['together', [0,1], [1,2], [0,2]]
n=0
# 5556x_minlen30_640px 5556x_minlen20_640px 5556x_reslen20_299px 5556x_reslen30_299px
Settings["models"][n]["dataset_name"] = "5556x_minlen30_640px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
# c
Settings["models"][n]["loss_func"] = 'mean_absolute_error'
Settings["models"][n]["metrics"] = ['mean_squared_error']
Settings["models"].append(DefaultModel.copy())
n=1
Settings["models"][n]["dataset_pointer"] = -1 # 0 - reuse the first dataset
Settings["models"][n]["dataset_name"] = "5556x_minlen30_640px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["model_type"] = 'osm_only'
Settings["models"][n]["unique_id"] = 'osm_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
# c
Settings["models"][n]["loss_func"] = 'mean_absolute_error'
Settings["models"][n]["metrics"] = ['mean_squared_error']
Settings["models"].append(DefaultModel.copy())
n=2
Settings["models"][n]["dataset_pointer"] = -1 # 0 - reuse the first dataset
Settings["models"][n]["dataset_name"] = "5556x_minlen30_640px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'img_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
# c
Settings["models"][n]["loss_func"] = 'mean_absolute_error'
Settings["models"][n]["metrics"] = ['mean_squared_error']
return Settings
| mit |
yyjiang/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 226 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/tests/test_grid_search.py | 67 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
ray-project/ray | python/ray/util/collective/tests/util.py | 1 | 12263 | import cupy as cp
import logging
import ray
import ray.util.collective as col
from ray.util.collective.types import Backend, ReduceOp
from ray.util.collective.collective_group.nccl_util import get_num_gpus
import torch
logger = logging.getLogger(__name__)
@ray.remote(num_gpus=1)
class Worker:
def __init__(self):
self.buffer = None
self.list_buffer = None
def init_tensors(self):
self.buffer = cp.ones((10,), dtype=cp.float32)
self.list_buffer = [cp.ones((10,), dtype=cp.float32) for _ in range(2)]
cp.cuda.Stream.null.synchronize()
return True
def init_group(self, world_size, rank, backend=Backend.NCCL, group_name="default"):
col.init_collective_group(world_size, rank, backend, group_name)
return True
def set_buffer(self, data):
self.buffer = data
return self.buffer
def get_buffer(self):
return self.buffer
def set_list_buffer(self, list_of_arrays):
self.list_buffer = list_of_arrays
return self.list_buffer
def do_allreduce(self, group_name="default", op=ReduceOp.SUM):
col.allreduce(self.buffer, group_name, op)
return self.buffer
def do_reduce(self, group_name="default", dst_rank=0, op=ReduceOp.SUM):
col.reduce(self.buffer, dst_rank, group_name, op)
return self.buffer
def do_broadcast(self, group_name="default", src_rank=0):
col.broadcast(self.buffer, src_rank, group_name)
return self.buffer
def do_allgather(self, group_name="default"):
col.allgather(self.list_buffer, self.buffer, group_name)
return self.list_buffer
def do_reducescatter(self, group_name="default", op=ReduceOp.SUM):
col.reducescatter(self.buffer, self.list_buffer, group_name, op)
return self.buffer
def do_send(self, group_name="default", dst_rank=0):
col.send(self.buffer, dst_rank, group_name)
return self.buffer
def do_recv(self, group_name="default", src_rank=0):
col.recv(self.buffer, src_rank, group_name)
return self.buffer
def destroy_group(self, group_name="default"):
col.destroy_collective_group(group_name)
return True
def report_rank(self, group_name="default"):
rank = col.get_rank(group_name)
return rank
def report_world_size(self, group_name="default"):
ws = col.get_collective_group_size(group_name)
return ws
def report_nccl_availability(self):
avail = col.nccl_available()
return avail
def report_gloo_availability(self):
avail = col.gloo_available()
return avail
def report_is_group_initialized(self, group_name="default"):
is_init = col.is_group_initialized(group_name)
return is_init
def create_collective_workers(num_workers=2, group_name="default", backend="nccl"):
actors = [None] * num_workers
for i in range(num_workers):
actor = Worker.remote()
ray.get([actor.init_tensors.remote()])
actors[i] = actor
world_size = num_workers
init_results = ray.get(
[
actor.init_group.remote(world_size, i, backend, group_name)
for i, actor in enumerate(actors)
]
)
return actors, init_results
def init_tensors_for_gather_scatter(
actors, array_size=10, dtype=cp.float32, tensor_backend="cupy"
):
world_size = len(actors)
for i, a in enumerate(actors):
if tensor_backend == "cupy":
t = cp.ones(array_size, dtype=dtype) * (i + 1)
elif tensor_backend == "torch":
t = torch.ones(array_size, dtype=torch.float32).cuda() * (i + 1)
else:
raise RuntimeError("Unsupported tensor backend.")
ray.get([a.set_buffer.remote(t)])
if tensor_backend == "cupy":
list_buffer = [cp.ones(array_size, dtype=dtype) for _ in range(world_size)]
elif tensor_backend == "torch":
list_buffer = [
torch.ones(array_size, dtype=torch.float32).cuda()
for _ in range(world_size)
]
else:
raise RuntimeError("Unsupported tensor backend.")
ray.get([a.set_list_buffer.remote(list_buffer) for a in actors])
@ray.remote(num_gpus=2)
class MultiGPUWorker:
def __init__(self):
self.buffer0 = None
self.buffer1 = None
self.list_buffer0 = None
self.list_buffer1 = None
def __del__(self):
self.buffer0 = None
self.buffer1 = None
self.list_buffer0 = None
self.list_buffer1 = None
def init_tensors(self):
with cp.cuda.Device(0):
self.buffer0 = cp.ones((10,), dtype=cp.float32)
self.list_buffer0 = [cp.ones((10,), dtype=cp.float32) for _ in range(4)]
with cp.cuda.Device(1):
self.buffer1 = cp.ones((10,), dtype=cp.float32)
self.list_buffer1 = [cp.ones((10,), dtype=cp.float32) for _ in range(4)]
cp.cuda.Stream.null.synchronize()
return True
def init_group(self, world_size, rank, backend=Backend.NCCL, group_name="default"):
col.init_collective_group(world_size, rank, backend, group_name)
return True
def set_buffer(
self,
size,
value0=1.0,
value1=1.0,
dtype=cp.float32,
tensor_type0="cupy",
tensor_type1="cupy",
):
if tensor_type0 == "cupy":
with cp.cuda.Device(0):
self.buffer0 = cp.ones(size, dtype=dtype) * value0
elif tensor_type0 == "torch":
self.buffer0 = torch.ones(size, dtype=torch.float32).cuda(0) * value0
else:
raise RuntimeError()
if tensor_type1 == "cupy":
with cp.cuda.Device(1):
self.buffer1 = cp.ones(size, dtype=dtype) * value1
elif tensor_type1 == "torch":
self.buffer1 = torch.ones(size, dtype=torch.float32).cuda(1) * value1
else:
raise RuntimeError()
cp.cuda.Device(0).synchronize()
cp.cuda.Device(1).synchronize()
# cp.cuda.Stream.null.synchronize()
return True
def set_list_buffer(
self,
size,
value0=1.0,
value1=1.0,
dtype=cp.float32,
tensor_type0="cupy",
tensor_type1="cupy",
):
if tensor_type0 == "cupy":
with cp.cuda.Device(0):
self.list_buffer0 = [
cp.ones(size, dtype=dtype) * value0 for _ in range(4)
]
elif tensor_type0 == "torch":
self.list_buffer0 = [
torch.ones(size, dtype=torch.float32).cuda(0) * value0 for _ in range(4)
]
else:
raise RuntimeError()
if tensor_type1 == "cupy":
with cp.cuda.Device(1):
self.list_buffer1 = [
cp.ones(size, dtype=dtype) * value1 for _ in range(4)
]
elif tensor_type1 == "torch":
self.list_buffer1 = [
torch.ones(size, dtype=torch.float32).cuda(1) * value1 for _ in range(4)
]
else:
raise RuntimeError()
cp.cuda.Device(0).synchronize()
cp.cuda.Device(1).synchronize()
return True
@ray.method(num_returns=2)
def get_buffer(self):
return self.buffer0, self.buffer1
def do_allreduce_multigpu(self, group_name="default", op=ReduceOp.SUM):
col.allreduce_multigpu([self.buffer0, self.buffer1], group_name, op)
cp.cuda.Device(0).synchronize()
cp.cuda.Device(1).synchronize()
return self.buffer0
def do_reduce_multigpu(
self, group_name="default", dst_rank=0, dst_gpu_index=0, op=ReduceOp.SUM
):
col.reduce_multigpu(
[self.buffer0, self.buffer1], dst_rank, dst_gpu_index, group_name, op
)
cp.cuda.Device(0).synchronize()
cp.cuda.Device(1).synchronize()
return self.buffer0, self.buffer1
def do_broadcast_multigpu(self, group_name="default", src_rank=0, src_gpu_index=0):
col.broadcast_multigpu(
[self.buffer0, self.buffer1], src_rank, src_gpu_index, group_name
)
return self.buffer0, self.buffer1
def do_allgather_multigpu(self, group_name="default"):
col.allgather_multigpu(
[self.list_buffer0, self.list_buffer1],
[self.buffer0, self.buffer1],
group_name,
)
cp.cuda.Device(0).synchronize()
cp.cuda.Device(1).synchronize()
return self.list_buffer0, self.list_buffer1
def do_reducescatter_multigpu(self, group_name="default", op=ReduceOp.SUM):
col.reducescatter_multigpu(
[self.buffer0, self.buffer1],
[self.list_buffer0, self.list_buffer1],
group_name,
op,
)
cp.cuda.Device(0).synchronize()
cp.cuda.Device(1).synchronize()
return self.buffer0, self.buffer1
def do_send_multigpu(
self, group_name="default", dst_rank=0, dst_gpu_index=0, src_gpu_index=0
):
if src_gpu_index == 0:
col.send_multigpu(self.buffer0, dst_rank, dst_gpu_index, group_name)
cp.cuda.Device(0).synchronize()
return self.buffer0
elif src_gpu_index == 1:
col.send_multigpu(self.buffer1, dst_rank, dst_gpu_index, group_name)
cp.cuda.Device(1).synchronize()
return self.buffer1
else:
raise RuntimeError()
def do_recv_multigpu(
self, group_name="default", src_rank=0, src_gpu_index=0, dst_gpu_index=0
):
if dst_gpu_index == 0:
col.recv_multigpu(self.buffer0, src_rank, src_gpu_index, group_name)
cp.cuda.Device(0).synchronize()
return self.buffer0
elif dst_gpu_index == 1:
col.recv_multigpu(self.buffer1, src_rank, src_gpu_index, group_name)
cp.cuda.Device(1).synchronize()
return self.buffer1
else:
raise RuntimeError()
def destroy_group(self, group_name="default"):
col.destroy_collective_group(group_name)
return True
def report_rank(self, group_name="default"):
rank = col.get_rank(group_name)
return rank
def report_world_size(self, group_name="default"):
ws = col.get_collective_group_size(group_name)
return ws
def report_nccl_availability(self):
avail = col.nccl_available()
return avail
def report_gloo_availability(self):
avail = col.gloo_available()
return avail
def report_is_group_initialized(self, group_name="default"):
is_init = col.is_group_initialized(group_name)
return is_init
def report_num_gpus(self):
n_gpus = get_num_gpus()
return n_gpus
def create_collective_multigpu_workers(
num_workers=2, group_name="default", backend="nccl"
):
actors = [None] * num_workers
for i in range(num_workers):
actor = MultiGPUWorker.remote()
ray.get([actor.set_buffer.remote([10])], timeout=10)
ray.get([actor.set_list_buffer.remote([10])], timeout=10)
actors[i] = actor
world_size = num_workers
init_results = ray.get(
[
actor.init_group.remote(world_size, i, backend, group_name)
for i, actor in enumerate(actors)
]
)
return actors, init_results
def init_tensors_for_gather_scatter_multigpu(
actors, array_size=10, tensor_backend="cupy"
):
for i, a in enumerate(actors):
if tensor_backend == "cupy":
ray.get([a.set_buffer.remote(array_size)])
ray.get([a.set_list_buffer.remote(array_size)])
elif tensor_backend == "torch":
ray.get(
[
a.set_buffer.remote(
array_size, tensor_type0="torch", tensor_type1="torch"
)
]
)
ray.get(
[
a.set_list_buffer.remote(
array_size, tensor_type0="torch", tensor_type1="torch"
)
]
)
else:
raise RuntimeError("Unsupported tensor backend.")
| apache-2.0 |
ray-project/ray | rllib/evaluation/tests/test_env_runner_v2.py | 1 | 12202 | import unittest
import ray
from ray.rllib.algorithms.callbacks import DefaultCallbacks
from ray.rllib.algorithms.ppo import PPO, PPOConfig
from ray.rllib.connectors.connector import ActionConnector, ConnectorContext
from ray.rllib.evaluation.metrics import RolloutMetrics
from ray.rllib.examples.env.debug_counter_env import DebugCounterEnv
from ray.rllib.examples.env.multi_agent import BasicMultiAgent
from ray.rllib.examples.policy.random_policy import RandomPolicy
from ray.rllib.policy.policy import PolicySpec
from ray.tune import register_env
from ray.rllib.policy.sample_batch import convert_ma_batch_to_sample_batch
register_env("basic_multiagent", lambda _: BasicMultiAgent(2))
class TestEnvRunnerV2(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
# When dealing with two policies in these tests, simply alternate between the 2
# policies to make sure we have data for inference for both policies for each
# step.
class AlternatePolicyMapper:
def __init__(self):
self.policies = ["one", "two"]
self.next = 0
def map(self):
p = self.policies[self.next]
self.next = 1 - self.next
return p
cls.mapper = AlternatePolicyMapper()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_sample_batch_rollout_single_agent_env(self):
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
)
algo = PPO(config, env=DebugCounterEnv)
rollout_worker = algo.workers.local_worker()
sample_batch = rollout_worker.sample()
sample_batch = convert_ma_batch_to_sample_batch(sample_batch)
self.assertEqual(sample_batch["t"][0], 0)
self.assertEqual(sample_batch.env_steps(), 200)
self.assertEqual(sample_batch.agent_steps(), 200)
def test_sample_batch_rollout_multi_agent_env(self):
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
)
algo = PPO(config, env="basic_multiagent")
rollout_worker = algo.workers.local_worker()
sample_batch = rollout_worker.sample()
# 2 agents. So the multi-agent SampleBatch should have
# 200 env steps, and 400 agent steps.
self.assertEqual(sample_batch.env_steps(), 200)
self.assertEqual(sample_batch.agent_steps(), 400)
def test_inference_batches_are_grouped_by_policy(self):
# Create 2 policies that have different inference batch shapes.
class RandomPolicyOne(RandomPolicy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.view_requirements["rewards"].used_for_compute_actions = True
self.view_requirements["dones"].used_for_compute_actions = True
# Create 2 policies that have different inference batch shapes.
class RandomPolicyTwo(RandomPolicy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.view_requirements["rewards"].used_for_compute_actions = False
self.view_requirements["dones"].used_for_compute_actions = False
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
.multi_agent(
policies={
"one": PolicySpec(
policy_class=RandomPolicyOne,
),
"two": PolicySpec(
policy_class=RandomPolicyTwo,
),
},
policy_mapping_fn=lambda *args, **kwargs: self.mapper.map(),
policies_to_train=["one"],
count_steps_by="agent_steps",
)
)
algo = PPO(config, env="basic_multiagent")
local_worker = algo.workers.local_worker()
env = local_worker.env
obs, rewards, dones, infos = local_worker.env.step(
{0: env.action_space.sample(), 1: env.action_space.sample()}
)
env_id = 0
env_runner = local_worker.sampler._env_runner_obj
env_runner.create_episode(env_id)
_, to_eval, _ = env_runner._process_observations(
{0: obs}, {0: rewards}, {0: dones}, {0: infos}
)
# We should have 2 separate batches for both policies.
# Each batch has 1 samples.
self.assertTrue("one" in to_eval)
self.assertEqual(len(to_eval["one"]), 1)
self.assertTrue("two" in to_eval)
self.assertEqual(len(to_eval["two"]), 1)
def test_action_connector_gets_raw_input_dict(self):
class CheckInputDictActionConnector(ActionConnector):
def __call__(self, ac_data):
assert ac_data.input_dict, "raw input dict should be available"
return ac_data
class AddActionConnectorCallbacks(DefaultCallbacks):
def on_create_policy(self, *, policy_id, policy) -> None:
policy.action_connectors.append(
CheckInputDictActionConnector(ConnectorContext.from_policy(policy))
)
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.callbacks(
callbacks_class=AddActionConnectorCallbacks,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
)
algo = PPO(config, env="basic_multiagent")
rollout_worker = algo.workers.local_worker()
# As long as we can successfully sample(), things should be good.
_ = rollout_worker.sample()
def test_start_episode(self):
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
.multi_agent(
policies={
"one": PolicySpec(
policy_class=RandomPolicy,
),
"two": PolicySpec(
policy_class=RandomPolicy,
),
},
policy_mapping_fn=lambda *args, **kwargs: self.mapper.map(),
policies_to_train=["one"],
count_steps_by="agent_steps",
)
)
algo = PPO(config, env="basic_multiagent")
local_worker = algo.workers.local_worker()
env_runner = local_worker.sampler._env_runner_obj
# No episodes present
self.assertEqual(env_runner._active_episodes.get(0), None)
env_runner.step()
# Only initial observation collected, add_init_obs called on episode
self.assertEqual(env_runner._active_episodes[0].total_env_steps, 0)
self.assertEqual(env_runner._active_episodes[0].total_agent_steps, 0)
env_runner.step()
# First recorded step, add_action_reward_done_next_obs called
self.assertEqual(env_runner._active_episodes[0].total_env_steps, 1)
self.assertEqual(env_runner._active_episodes[0].total_agent_steps, 2)
def test_env_runner_output(self):
# Test if we can produce RolloutMetrics just by stepping
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
.multi_agent(
policies={
"one": PolicySpec(
policy_class=RandomPolicy,
),
"two": PolicySpec(
policy_class=RandomPolicy,
),
},
policy_mapping_fn=lambda *args, **kwargs: self.mapper.map(),
policies_to_train=["one"],
count_steps_by="agent_steps",
)
)
algo = PPO(config, env="basic_multiagent")
local_worker = algo.workers.local_worker()
env_runner = local_worker.sampler._env_runner_obj
outputs = []
while not outputs:
outputs = env_runner.step()
self.assertEqual(len(outputs), 1)
self.assertTrue(len(list(outputs[0].agent_rewards.keys())) == 2)
def test_env_error(self):
class CheckErrorCallbacks(DefaultCallbacks):
def on_episode_end(
self, *, worker, base_env, policies, episode, env_index=None, **kwargs
) -> None:
# We should see an error episode.
assert isinstance(episode, Exception)
# Test if we can produce RolloutMetrics just by stepping
config = (
PPOConfig()
.framework("torch")
.training(
# Specifically ask for a batch of 200 samples.
train_batch_size=200,
)
.rollouts(
num_envs_per_worker=1,
num_rollout_workers=0,
# Enable EnvRunnerV2.
enable_connectors=True,
)
.multi_agent(
policies={
"one": PolicySpec(
policy_class=RandomPolicy,
),
"two": PolicySpec(
policy_class=RandomPolicy,
),
},
policy_mapping_fn=lambda *args, **kwargs: self.mapper.map(),
policies_to_train=["one"],
count_steps_by="agent_steps",
)
.callbacks(
callbacks_class=CheckErrorCallbacks,
)
)
algo = PPO(config, env="basic_multiagent")
local_worker = algo.workers.local_worker()
env_runner = local_worker.sampler._env_runner_obj
# Run a couple of steps.
env_runner.step()
env_runner.step()
active_envs, to_eval, outputs = env_runner._process_observations(
unfiltered_obs={0: AttributeError("mock error")},
rewards={0: {}},
dones={0: {"__all__": True}},
infos={0: {}},
)
self.assertEqual(active_envs, {0})
self.assertTrue(to_eval) # to_eval contains data for the resetted new episode.
self.assertEqual(len(outputs), 1)
self.assertTrue(isinstance(outputs[0], RolloutMetrics))
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 |