repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
---|---|---|---|---|---|
seaotterman/tensorflow | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
sinhrks/expandas | pandas_ml/skaccessors/test/test_svm.py | 2 | 2995 | #!/usr/bin/env python
import pytest
import numpy as np
import sklearn.datasets as datasets
import sklearn.svm as svm
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestSVM(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.svm.SVC, svm.SVC)
self.assertIs(df.svm.LinearSVC, svm.LinearSVC)
self.assertIs(df.svm.NuSVC, svm.NuSVC)
self.assertIs(df.svm.SVR, svm.SVR)
self.assertIs(df.svm.NuSVR, svm.NuSVR)
self.assertIs(df.svm.OneClassSVM, svm.OneClassSVM)
def test_l1_min_c(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.svm.l1_min_c()
expected = svm.l1_min_c(iris.data, iris.target)
self.assertAlmostEqual(result, expected)
@pytest.mark.parametrize("algo", ['SVR', 'NuSVR'])
def test_Regressions_curve(self, algo):
# http://scikit-learn.org/stable/auto_examples/plot_kernel_ridge_regression.html
X = 5 * np.random.rand(1000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(X.shape[0] // 5))
df = pdml.ModelFrame(data=X, target=y)
mod1 = getattr(df.svm, algo)()
mod2 = getattr(svm, algo)()
df.fit(mod1)
mod2.fit(X, y)
result = df.predict(mod1)
expected = mod2.predict(X)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
@pytest.mark.parametrize("algo", ['SVR', 'NuSVR'])
def test_Regressions_iris(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.svm, algo)()
mod2 = getattr(svm, algo)()
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
@pytest.mark.parametrize("algo", ['LinearSVC', 'NuSVC'])
def test_Classifications(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.svm, algo)(random_state=self.random_state)
mod2 = getattr(svm, algo)(random_state=self.random_state)
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
| bsd-3-clause |
arahuja/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
vybstat/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
scipy/scipy | scipy/signal/bsplines.py | 12 | 19509 | from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
Parameters
----------
Iin : array_like
input data set
lmbda : float, optional
spline smooghing fall-off value, default is `5.0`.
Returns
-------
res : ndarray
filterd input data
Examples
--------
We can filter an multi dimentional signal (ex: 2D image) using cubic
B-spline filter:
>>> from scipy.signal import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, lmbda=0.1)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = float_factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values
See Also
--------
cubic : A cubic B-spline.
quadratic : A quadratic B-spline.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
r"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values approximated by a zero-mean Gaussian
function.
Notes
-----
The B-spline basis function can be approximated well by a zero-mean
Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
for large `n` :
.. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
.. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
Examples
--------
We can calculate B-Spline basis functions approximated by a gaussian
distribution:
>>> from scipy.signal import gauss_spline, bspline
>>> knots = np.array([-1.0, 0.0, -1.0])
>>> gauss_spline(knots, 3)
array([0.15418033, 0.6909883, 0.15418033]) # may vary
>>> bspline(knots, 3)
array([0.16666667, 0.66666667, 0.16666667]) # may vary
"""
x = asarray(x)
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Cubic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
quadratic : A quadratic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Quadratic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
cubic : A cubic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
See Also
--------
cspline1d_eval : Evaluate a cubic spline at the new set of points.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a cubic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Parameters
----------
cj : ndarray
cublic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a cubic spline points.
See Also
--------
cspline1d : Compute cubic spline coefficients for rank-1 array.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| bsd-3-clause |
mmaraya/nd101 | ch02/lesson04/and.perceptron.py | 1 | 1065 | #!/usr/bin/env python
import pandas as pd
# Set weight1, weight2, and bias
weight1 = 0.5
weight2 = 0.5
bias = -1.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
| mit |
mahak/spark | python/pyspark/pandas/base.py | 3 | 55960 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base and utility classes for pandas-on-Spark objects.
"""
from abc import ABCMeta, abstractmethod
from functools import wraps, partial
from itertools import chain
from typing import Any, Callable, Optional, Sequence, Tuple, Union, cast, TYPE_CHECKING
import numpy as np
import pandas as pd # noqa: F401
from pandas.api.types import is_list_like, CategoricalDtype
from pyspark.sql import functions as F, Column, Window
from pyspark.sql.types import (
DoubleType,
FloatType,
LongType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, IndexOpsLike, Label, SeriesOrIndex
from pyspark.pandas.config import get_option, option_context
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkIndexOpsMethods
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.utils import (
combine_frames,
same_anchor,
scol_for,
validate_axis,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from pyspark.pandas.frame import DataFrame
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.data_type_ops.base import DataTypeOps # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def should_alignment_for_column_op(self: SeriesOrIndex, other: SeriesOrIndex) -> bool:
from pyspark.pandas.series import Series
if isinstance(self, Series) and isinstance(other, Series):
return not same_anchor(self, other)
else:
return self._internal.spark_frame is not other._internal.spark_frame
def align_diff_index_ops(
func: Callable[..., Column], this_index_ops: SeriesOrIndex, *args: Any
) -> SeriesOrIndex:
"""
Align the `IndexOpsMixin` objects and apply the function.
Parameters
----------
func : The function to apply
this_index_ops : IndexOpsMixin
A base `IndexOpsMixin` object
args : list of other arguments including other `IndexOpsMixin` objects
Returns
-------
`Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`
"""
from pyspark.pandas.indexes import Index
from pyspark.pandas.series import Series, first_series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):
combined = combine_frames(
this_index_ops.to_frame(),
*[cast(Series, col).rename(i) for i, col in enumerate(cols)],
how="full"
)
return column_op(func)(
combined["this"]._psser_for(combined["this"]._internal.column_labels[0]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
# This could cause as many counts, reset_index calls, joins for combining
# as the number of `Index`s in `args`. So far it's fine since we can assume the ops
# only work between at most two `Index`s. We might need to fix it in the future.
self_len = len(this_index_ops)
if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):
raise ValueError("operands could not be broadcast together with shapes")
with option_context("compute.default_index_type", "distributed-sequence"):
if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):
return Index(
column_op(func)(
this_index_ops.to_series().reset_index(drop=True),
*[
arg.to_series().reset_index(drop=True)
if isinstance(arg, Index)
else arg
for arg in args
]
).sort_index(),
name=this_index_ops.name,
)
elif isinstance(this_index_ops, Series):
this = cast(DataFrame, this_index_ops.reset_index())
that = [
cast(Series, col.to_series() if isinstance(col, Index) else col)
.rename(i)
.reset_index(drop=True)
for i, col in enumerate(cols)
]
combined = combine_frames(this, *that, how="full").sort_index()
combined = combined.set_index(
combined._internal.column_labels[: this_index_ops._internal.index_level]
)
combined.index.names = this_index_ops._internal.index_names
return column_op(func)(
first_series(combined["this"]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)
that_series = next(col for col in cols if isinstance(col, Series))
that_frame = that_series._psdf[
[
cast(Series, col.to_series() if isinstance(col, Index) else col).rename(i)
for i, col in enumerate(cols)
]
]
combined = combine_frames(this, that_frame.reset_index()).sort_index()
self_index = (
combined["this"].set_index(combined["this"]._internal.column_labels).index
)
other = combined["that"].set_index(
combined["that"]._internal.column_labels[: that_series._internal.index_level]
)
other.index.names = that_series._internal.index_names
return column_op(func)(
self_index,
*[
other._psser_for(label)
for label, col in zip(other._internal.column_labels, cols)
]
).rename(that_series.name)
def booleanize_null(scol: Column, f: Callable[..., Column]) -> Column:
"""
Booleanize Null in Spark Column
"""
comp_ops = [
getattr(Column, "__{}__".format(comp_op))
for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"]
]
if f in comp_ops:
# if `f` is "!=", fill null with True otherwise False
filler = f == Column.__ne__
scol = F.when(scol.isNull(), filler).otherwise(scol)
return scol
def column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
"""
A decorator that wraps APIs taking/returning Spark Column so that pandas-on-Spark Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes pandas-on-Spark Series as well and returns
pandas-on-Spark Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: pandas-on-Spark Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.series import Series
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is pandas-on-Spark Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, (Series, Index))]
if all(not should_alignment_for_column_op(self, col) for col in cols):
# Same DataFrame anchors
scol = f(
self.spark.column,
*[arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]
)
field = InternalField.from_struct_field(
self._internal.spark_frame.select(scol).schema[0],
use_extension_dtypes=any(
isinstance(col.dtype, extension_dtypes) for col in [self] + cols
),
)
if not field.is_extension_dtype:
scol = booleanize_null(scol, f).alias(field.name)
if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):
index_ops = self._with_new_scol(scol, field=field)
else:
psser = next(col for col in cols if isinstance(col, Series))
index_ops = psser._with_new_scol(scol, field=field)
elif get_option("compute.ops_on_diff_frames"):
index_ops = align_diff_index_ops(f, self, *args)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
if not all(self.name == col.name for col in cols):
index_ops = index_ops.rename(None)
return index_ops
return wrapper
def numpy_column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, "s")))
else:
new_args.append(arg)
return column_op(f)(self, *new_args)
return wrapper
class IndexOpsMixin(object, metaclass=ABCMeta):
"""common ops mixin to support a unified interface / docs for Series / Index
Assuming there are following attributes or properties and function.
"""
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@property
@abstractmethod
def _psdf(self) -> DataFrame:
pass
@abstractmethod
def _with_new_scol(
self: IndexOpsLike, scol: Column, *, field: Optional[InternalField] = None
) -> IndexOpsLike:
pass
@property
@abstractmethod
def _column_label(self) -> Optional[Label]:
pass
@property
@abstractmethod
def spark(self: IndexOpsLike) -> SparkIndexOpsMethods[IndexOpsLike]:
pass
@property
def _dtype_op(self) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.base import DataTypeOps
return DataTypeOps(self.dtype, self.spark.data_type)
@abstractmethod
def copy(self: IndexOpsLike) -> IndexOpsLike:
pass
# arithmetic operators
def __neg__(self: IndexOpsLike) -> IndexOpsLike:
return cast(IndexOpsLike, column_op(Column.__neg__)(self))
def __add__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.add(self, other)
def __sub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.sub(self, other)
def __mul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mul(self, other)
def __truediv__(self, other: Any) -> SeriesOrIndex:
"""
__truediv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.truediv(self, other)
def __mod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mod(self, other)
def __radd__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.radd(self, other)
def __rsub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rsub(self, other)
def __rmul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmul(self, other)
def __rtruediv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rtruediv(self, other)
def __floordiv__(self, other: Any) -> SeriesOrIndex:
"""
__floordiv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.floordiv(self, other)
def __rfloordiv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rfloordiv(self, other)
def __rmod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmod(self, other)
def __pow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.pow(self, other)
def __rpow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rpow(self, other)
def __abs__(self: IndexOpsLike) -> IndexOpsLike:
return cast(IndexOpsLike, column_op(F.abs)(self))
# comparison operators
def __eq__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return column_op(Column.__eq__)(self, other)
def __ne__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return column_op(Column.__ne__)(self, other)
__lt__ = column_op(Column.__lt__)
__le__ = column_op(Column.__le__)
__ge__ = column_op(Column.__ge__)
__gt__ = column_op(Column.__gt__)
def __invert__(self: IndexOpsLike) -> IndexOpsLike:
return cast(IndexOpsLike, column_op(Column.__invert__)(self))
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
def __and__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__and__(self, other)
def __or__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__or__(self, other)
def __rand__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rand(self, other)
def __ror__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.ror(self, other)
def __len__(self) -> int:
return len(self._psdf)
# NDArray Compat
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
) -> SeriesOrIndex:
from pyspark.pandas import numpy_compat
# Try dunder methods first.
result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
# After that, we try with PySpark APIs.
if result is NotImplemented:
result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return cast(SeriesOrIndex, result)
else:
# TODO: support more APIs?
raise NotImplementedError(
"pandas-on-Spark objects currently do not support %s." % ufunc
)
@property
def dtype(self) -> Dtype:
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ps.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ps.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
return self._internal.data_fields[0].dtype
@property
def empty(self) -> bool:
"""
Returns true if the current object is empty. Otherwise, returns false.
>>> ps.range(10).id.empty
False
>>> ps.range(0).id.empty
True
>>> ps.DataFrame({}, index=list('abc')).index.empty
False
"""
return self._internal.resolved_copy.spark_frame.rdd.isEmpty()
@property
def hasnans(self) -> bool:
"""
Return True if it has any missing values. Otherwise, it returns False.
>>> ps.DataFrame({}, index=list('abc')).index.hasnans
False
>>> ps.Series(['a', None]).hasnans
True
>>> ps.Series([1.0, 2.0, np.nan]).hasnans
True
>>> ps.Series([1, 2, 3]).hasnans
False
>>> (ps.Series([1.0, 2.0, np.nan]) + 1).hasnans
True
>>> ps.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans
False
"""
sdf = self._internal.spark_frame
scol = self.spark.column
if isinstance(self.spark.data_type, (DoubleType, FloatType)):
return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]
else:
return sdf.select(F.max(scol.isNull())).collect()[0][0]
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are monotonically increasing.
.. note:: the current implementation of is_monotonic requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are
transferred to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['1/1/2018', '3/1/2018', '4/1/2018'])
>>> ser.is_monotonic
True
>>> df = ps.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})
>>> df.dates.is_monotonic
False
>>> df.index.is_monotonic
True
>>> ser = ps.Series([1])
>>> ser.is_monotonic
True
>>> ser = ps.Series([])
>>> ser.is_monotonic
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic
False
>>> ser.index.is_monotonic
True
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic
True
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic
False
"""
return self._is_monotonic("increasing")
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are monotonically decreasing.
.. note:: the current implementation of is_monotonic_decreasing requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are transferred
to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['4/1/2018', '3/1/2018', '1/1/2018'])
>>> ser.is_monotonic_decreasing
True
>>> df = ps.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})
>>> df.dates.is_monotonic_decreasing
False
>>> df.index.is_monotonic_decreasing
False
>>> ser = ps.Series([1])
>>> ser.is_monotonic_decreasing
True
>>> ser = ps.Series([])
>>> ser.is_monotonic_decreasing
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic_decreasing
True
>>> ser.index.is_monotonic_decreasing
False
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic_decreasing
False
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic_decreasing
True
"""
return self._is_monotonic("decreasing")
def _is_locally_monotonic_spark_column(self, order: str) -> Column:
window = (
Window.partitionBy(F.col("__partition_id"))
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-1, -1)
)
if order == "increasing":
return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
else:
return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
def _is_monotonic(self, order: str) -> bool:
assert order in ("increasing", "decreasing")
sdf = self._internal.spark_frame
sdf = (
sdf.select(
F.spark_partition_id().alias(
"__partition_id"
), # Make sure we use the same partition id in the whole job.
F.col(NATURAL_ORDER_COLUMN_NAME),
self.spark.column.alias("__origin"),
)
.select(
F.col("__partition_id"),
F.col("__origin"),
self._is_locally_monotonic_spark_column(order).alias(
"__comparison_within_partition"
),
)
.groupby(F.col("__partition_id"))
.agg(
F.min(F.col("__origin")).alias("__partition_min"),
F.max(F.col("__origin")).alias("__partition_max"),
F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True))).alias(
"__comparison_within_partition"
),
)
)
# Now we're windowing the aggregation results without partition specification.
# The number of rows here will be as the same of partitions, which is expected
# to be small.
window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1)
if order == "increasing":
comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over(
window
)
else:
comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over(
window
)
sdf = sdf.select(
comparison_col.alias("__comparison_between_partitions"),
F.col("__comparison_within_partition"),
)
ret = sdf.select(
F.min(F.coalesce(F.col("__comparison_between_partitions"), SF.lit(True)))
& F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True)))
).collect()[0][0]
if ret is None:
return True
else:
return ret
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
Return 1 for Series / Index / MultiIndex.
Examples
--------
For Series
>>> s = ps.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])
>>> s.ndim
1
For Index
>>> s.index.ndim
1
For MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index.ndim
1
"""
return 1
def astype(self: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""
Cast a pandas-on-Spark object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ps.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
>>> ser.rename("a").to_frame().set_index("a").index.astype('int64')
Int64Index([1, 2], dtype='int64', name='a')
"""
return self._dtype_op.astype(self, dtype)
def isin(self: IndexOpsLike, values: Sequence[Any]) -> IndexOpsLike:
"""
Check whether `values` are contained in Series or Index.
Return a boolean Series or Index showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test.
Returns
-------
isin : Series (bool dtype) or Index (bool dtype)
Examples
--------
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
>>> s.rename("a").to_frame().set_index("a").index.isin(['lama'])
Index([True, False, True, False, True, False], dtype='object', name='a')
"""
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__)
)
values = values.tolist() if isinstance(values, np.ndarray) else list(values)
return self._with_new_scol(self.spark.column.isin([SF.lit(v) for v in values]))
def isnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.isna()
Index([False, False, True], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
return self._dtype_op.isnull(self)
isna = isnull
def notnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.notna()
Index([True, True, False], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("notna is not defined for MultiIndex")
return (~self.isnull()).rename(self.name) # type: ignore
notna = notnull
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Axis = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([True, True]).all()
True
>>> ps.Series([True, False]).all()
False
>>> ps.Series([0, 1]).all()
False
>>> ps.Series([1, 2, 3]).all()
True
>>> ps.Series([True, True, None]).all()
True
>>> ps.Series([True, False, None]).all()
False
>>> ps.Series([]).all()
True
>>> ps.Series([np.nan]).all()
True
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.all()
False
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), SF.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Axis = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([False, False]).any()
False
>>> ps.Series([True, False]).any()
True
>>> ps.Series([0, 0]).any()
False
>>> ps.Series([0, 1, 2]).any()
True
>>> ps.Series([False, False, None]).any()
False
>>> ps.Series([True, False, None]).any()
True
>>> ps.Series([]).any()
False
>>> ps.Series([np.nan]).any()
False
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.any()
True
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use max as its alternative:
ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), SF.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
# TODO: add frep and axis parameter
def shift(
self: IndexOpsLike, periods: int = 1, fill_value: Optional[Any] = None
) -> IndexOpsLike:
"""
Shift Series/Index by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input Series/Index, shifted.
Examples
--------
>>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.Col1.shift(periods=3)
0 NaN
1 NaN
2 NaN
3 10.0
4 20.0
Name: Col1, dtype: float64
>>> df.Col2.shift(periods=3, fill_value=0)
0 0
1 0
2 0
3 13
4 23
Name: Col2, dtype: int64
>>> df.index.shift(periods=3, fill_value=0)
Int64Index([0, 0, 0, 0, 1], dtype='int64')
"""
return self._shift(periods, fill_value).spark.analyzed
def _shift(
self: IndexOpsLike,
periods: int,
fill_value: Any,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> IndexOpsLike:
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
col = self.spark.column
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col, field=self._internal.data_fields[0].copy(nullable=True))
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins: None = None,
dropna: bool = True,
) -> "Series":
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
For Series
>>> df = ps.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
For Index
>>> idx = ps.Index([3, 1, 2, 3, 4, np.nan])
>>> idx
Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')
>>> idx.value_counts().sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**sort**
With `sort` set to `False`, the result wouldn't be sorted by number of count.
>>> idx.value_counts(sort=True).sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**normalize**
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> idx.value_counts(normalize=True).sort_index()
1.0 0.2
2.0 0.2
3.0 0.4
4.0 0.2
dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP
1.0 1
2.0 1
3.0 2
4.0 1
NaN 1
dtype: int64
For MultiIndex.
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index # doctest: +SKIP
MultiIndex([( 'lama', 'weight'),
( 'lama', 'weight'),
( 'lama', 'weight'),
( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'weight'),
('falcon', 'length'),
('falcon', 'length')],
)
>>> s.index.value_counts().sort_index()
(cow, length) 1
(cow, weight) 2
(falcon, length) 2
(falcon, weight) 1
(lama, weight) 3
dtype: int64
>>> s.index.value_counts(normalize=True).sort_index()
(cow, length) 0.111111
(cow, weight) 0.222222
(falcon, length) 0.222222
(falcon, weight) 0.111111
(lama, weight) 0.333333
dtype: float64
If Index has name, keep the name up.
>>> idx = ps.Index([0, 0, 0, 1, 1, 2, 3], name='pandas-on-Spark')
>>> idx.value_counts().sort_index()
0 3
1 2
2 1
3 1
Name: pandas-on-Spark, dtype: int64
"""
from pyspark.pandas.series import first_series
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()
else:
sdf_dropna = self._internal.spark_frame.select(self.spark.column)
index_name = SPARK_DEFAULT_INDEX_NAME
column_name = self._internal.data_spark_column_names[0]
sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col("count"))
else:
sdf = sdf.orderBy(F.col("count").desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn("count", F.col("count") / SF.lit(sum))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, index_name)],
column_labels=self._internal.column_labels,
data_spark_columns=[scol_for(sdf, "count")],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to pandas-on-Spark and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> ps.Series([1, 2, 3, np.nan]).nunique()
3
>>> ps.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ps.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
>>> idx = ps.Index([1, 1, 2, None])
>>> idx
Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')
>>> idx.nunique()
2
>>> idx.nunique(dropna=False)
3
"""
res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])
return res.collect()[0][0]
def _nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> Column:
colname = self._internal.data_spark_column_names[0]
count_fn = cast(
Callable[[Column], Column],
partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct,
)
if dropna:
return count_fn(self.spark.column).alias(colname)
else:
return (
count_fn(self.spark.column)
+ F.when(
F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
).alias(colname)
def take(self: IndexOpsLike, indices: Sequence[int]) -> IndexOpsLike:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
Series
>>> psser = ps.Series([100, 200, 300, 400, 500])
>>> psser
0 100
1 200
2 300
3 400
4 500
dtype: int64
>>> psser.take([0, 2, 4]).sort_index()
0 100
2 300
4 500
dtype: int64
Index
>>> psidx = ps.Index([100, 200, 300, 400, 500])
>>> psidx
Int64Index([100, 200, 300, 400, 500], dtype='int64')
>>> psidx.take([0, 2, 4]).sort_values()
Int64Index([100, 300, 500], dtype='int64')
MultiIndex
>>> psmidx = ps.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c")])
>>> psmidx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('x', 'c')],
)
>>> psmidx.take([0, 2]) # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'c')],
)
"""
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise TypeError("`indices` must be a list-like except dict or set")
if isinstance(self, ps.Series):
return cast(IndexOpsLike, self.iloc[indices])
else:
return cast(IndexOpsLike, self._psdf.iloc[indices].index)
def factorize(
self: IndexOpsLike, sort: bool = True, na_sentinel: Optional[int] = -1
) -> Tuple[IndexOpsLike, pd.Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values.
Parameters
----------
sort : bool, default True
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
Returns
-------
codes : Series or Index
A Series or Index that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : pd.Index
The unique valid values.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
Examples
--------
>>> psser = ps.Series(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psser.factorize()
>>> codes
0 1
1 -1
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=None)
>>> codes
0 1
1 3
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c', None], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=-2)
>>> codes
0 1
1 -2
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
For Index:
>>> psidx = ps.Index(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psidx.factorize()
>>> codes
Int64Index([1, -1, 0, 2, 1], dtype='int64')
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
"""
from pyspark.pandas.series import first_series
assert (na_sentinel is None) or isinstance(na_sentinel, int)
assert sort is True
if isinstance(self.dtype, CategoricalDtype):
categories = self.dtype.categories
if len(categories) == 0:
scol = SF.lit(None)
else:
kvs = list(
chain(
*[
(SF.lit(code), SF.lit(category))
for code, category in enumerate(categories)
]
)
)
map_scol = F.create_map(*kvs)
scol = map_scol.getItem(self.spark.column)
codes, uniques = self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0])
).factorize(na_sentinel=na_sentinel)
return codes, uniques.astype(self.dtype)
uniq_sdf = self._internal.spark_frame.select(self.spark.column).distinct()
# Check number of uniques and constructs sorted `uniques_list`
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
uniq_pdf = uniq_sdf.limit(max_compute_count + 1).toPandas()
if len(uniq_pdf) > max_compute_count:
raise ValueError(
"Current Series has more then {0} unique values. "
"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' "
"to more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
else:
uniq_pdf = uniq_sdf.toPandas()
# pandas takes both NaN and null in Spark to np.nan, so de-duplication is required
uniq_series = first_series(uniq_pdf).drop_duplicates()
uniques_list = uniq_series.tolist()
uniques_list = sorted(uniques_list, key=lambda x: (pd.isna(x), x))
# Constructs `unique_to_code` mapping non-na unique to code
unique_to_code = {}
if na_sentinel is not None:
na_sentinel_code = na_sentinel
code = 0
for unique in uniques_list:
if pd.isna(unique):
if na_sentinel is None:
na_sentinel_code = code
else:
unique_to_code[unique] = code
code += 1
kvs = list(
chain(*([(SF.lit(unique), SF.lit(code)) for unique, code in unique_to_code.items()]))
)
if len(kvs) == 0: # uniques are all missing values
new_scol = SF.lit(na_sentinel_code)
else:
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
cond = scol.isNull()
map_scol = F.create_map(*kvs)
null_scol = F.when(cond, SF.lit(na_sentinel_code))
new_scol = null_scol.otherwise(map_scol.getItem(scol))
codes = self._with_new_scol(new_scol.alias(self._internal.data_spark_column_names[0]))
if na_sentinel is not None:
# Drops the NaN from the uniques of the values
uniques_list = [x for x in uniques_list if not pd.isna(x)]
uniques = pd.Index(uniques_list)
return codes, uniques
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.base
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.base.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.base tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.base,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
moutai/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
sppalkia/weld | python/grizzly/grizzly/seriesweld.py | 3 | 19541 | import pandas as pd
import grizzly_impl
from lazy_op import LazyOpResult, to_weld_type
from weld.weldobject import *
import utils
class SeriesWeld(LazyOpResult):
"""Summary
Attributes:
column_name (TYPE): Description
df (TYPE): Description
dim (int): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, df=None, column_name=None, index_type=None, index_name=None):
"""Summary
TODO: Implement an actual Index Object like how Pandas does
Args:
expr (TYPE): Description
weld_type (TYPE): Description
df (None, optional): Description
column_name (None, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = 1
self.df = df
self.column_name = column_name
self.index_type = index_type
self.index_name = index_name
def __getitem__(self, key):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(key, slice):
start = key.start
# TODO : We currently do nothing with step
step = key.step
stop = key.stop
if self.index_type is not None:
index_expr = grizzly_impl.get_field(self.expr, 0)
column_expr = grizzly_impl.get_field(self.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
sliced_expr = grizzly_impl.slice_vec(zip_expr, start, stop)
unzip_expr = grizzly_impl.unzip_columns(
sliced_expr,
[self.index_type, self.weld_type]
)
return SeriesWeld(
unzip_expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
else:
return SeriesWeld(
grizzly_impl.slice_vec(
self.expr,
start,
stop
)
)
else:
# By default we return as if the key were predicates to filter by
return self.filter(key)
def __setitem__(self, predicates, new_value):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if self.df is not None and self.column_name is not None:
self.df[self.column_name] = self.mask(predicates, new_value)
@property
def loc(self):
return WeldLocIndexer(
self
)
def __getattr__(self, key):
"""Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
if key == 'str' and self.weld_type == WeldVec(WeldChar()):
return StringSeriesWeld(
self.expr,
self.weld_type,
self.df,
self.column_name
)
raise AttributeError("Attr %s does not exist" % key)
@property
def index(self):
if self.index_type is not None:
return SeriesWeld(
grizzly_impl.get_field(
self.expr,
0
),
self.index_type,
self.df,
self.index_name
)
# TODO : Make all series have a series attribute
raise Exception("No index present")
def evaluate(self, verbose=False, passes=None):
if self.index_type is not None:
index, column = LazyOpResult(
self.expr,
WeldStruct([WeldVec(self.index_type), WeldVec(self.weld_type)]),
0
).evaluate(verbose=verbose, passes=passes)
series = pd.Series(column, index)
series.index.rename(self.index_name, True)
return series
else:
column = LazyOpResult.evaluate(self, verbose=verbose, passes=passes)
return pd.Series(column)
def sort_values(self, ascending=False):
""" Sorts the values of this series
"""
if self.index_type is not None:
index_expr = grizzly_impl.get_field(self.expr, 0)
column_expr = grizzly_impl.get_field(self.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
result_expr = grizzly_impl.sort(zip_expr, 1, self.weld_type, ascending)
unzip_expr = grizzly_impl.unzip_columns(
result_expr,
[self.index_type, self.weld_type]
)
return SeriesWeld(
unzip_expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
else:
result_expr = grizzly_impl.sort(self.expr)
# TODO need to finish this
def unique(self):
"""Summary
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.unique(
self.expr,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def lower(self):
"""Summary
Returns:
TYPE: Description
"""
# TODO : Bug in nested map operating on strings
# TODO : Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.to_lower(
self.expr,
elem_type
),
self.weld_type,
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
def contains(self, string):
"""Summary
Returns:
TYPE: Description
"""
# Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
def isin(self, ls):
if isinstance(ls, SeriesWeld):
if self.weld_type == ls.weld_type:
return SeriesWeld(
grizzly_impl.isin(self.expr,
ls.expr,
self.weld_type),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call isin on different typed list")
def prod(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"*",
1,
self.weld_type
),
self.weld_type,
0
)
def sum(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"+",
0,
self.weld_type
),
self.weld_type,
0
)
def max(self):
"""Summary
Returns:
TYPE: Description
"""
pass
def min(self):
"""Summary
Returns:
TYPE: Description
"""
pass
def count(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.count(
self.expr,
self.weld_type
),
WeldInt(),
0
)
def mask(self, predicates, new_value):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return SeriesWeld(
grizzly_impl.mask(
self.expr,
predicates,
new_value,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def filter(self, predicates):
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return SeriesWeld(
grizzly_impl.filter(
self.expr,
predicates,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def add(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"+",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def __sub__(self, other):
# TODO subtractionw without index variables
if self.index_type is not None:
index = grizzly_impl.get_field(self.expr, 0)
expr1 = grizzly_impl.get_field(self.expr, 1)
else:
expr1 = self.expr
if other.index_type is not None:
index2 = grizzly_impl.get_field(other.expr, 0)
expr2 = grizzly_impl.get_field(other.expr, 1)
else:
expr2 = other.expr
index_expr = LazyOpResult(index, self.index_type, 0)
sub_expr = SeriesWeld(
grizzly_impl.element_wise_op(
expr1,
expr2,
"-",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
index_sub_expr = utils.group([index_expr, sub_expr])
return SeriesWeld(
index_sub_expr.expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
# We also need to ensure that both indexes of the subtracted
# columns are compatible
def sub(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"-",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def mul(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"*",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def div(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"/",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def per_element_and(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"&&",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def mod(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"%",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def __eq__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"==",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __ne__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"!=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __gt__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
">",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __ge__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if self.index_type is not None:
expr = grizzly_impl.get_field(self.expr, 1)
else:
expr = self.expr
return SeriesWeld(
grizzly_impl.compare(
expr,
other,
">=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __lt__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"<",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __le__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"<=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
class StringSeriesWeld:
"""Summary
Attributes:
column_name (TYPE): Description
df (TYPE): Description
dim (int): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, df=None, column_name=None):
"""Summary
Args:
expr (TYPE): Description
weld_type (TYPE): Description
df (None, optional): Description
column_name (None, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = 1
self.df = df
self.column_name = column_name
def slice(self, start, size):
"""Summary
Args:
start (TYPE): Description
size (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.slice(
self.expr,
start,
size,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
class WeldLocIndexer:
"""
Label location based indexer for selection by label for Series objects.
Attributes:
grizzly_obj (TYPE): The Series being indexed into.
"""
def __init__(self, grizzly_obj):
# If index_type field of grizzly_obj is None
# then we assume normal 0 - 1 indexing
self.grizzly_obj = grizzly_obj
def __getitem__(self, key):
if isinstance(self.grizzly_obj, SeriesWeld):
series = self.grizzly_obj
if isinstance(key, SeriesWeld):
if series.index_type is not None:
index_expr = grizzly_impl.get_field(series.expr, 0)
column_expr = grizzly_impl.get_field(series.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
predicate_expr = grizzly_impl.isin(index_expr, key.expr, series.index_type)
filtered_expr = grizzly_impl.filter(
zip_expr,
predicate_expr
)
unzip_expr = grizzly_impl.unzip_columns(
filtered_expr,
[series.index_type, series.weld_type]
)
return SeriesWeld(
unzip_expr,
series.weld_type,
series.df,
series.column_name,
series.index_type,
series.index_name
)
# TODO : Need to implement for non-pivot tables
raise Exception("Cannot invoke getitem on non SeriesWeld object")
| bsd-3-clause |
PMitura/smiles-neural-network | baselines/sicho_svm_uni_feature_sel.py | 1 | 8481 | #! /usr/bin/env python
import db
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit.Chem import Descriptors
from sklearn.feature_selection import VarianceThreshold
import pylab
data = db.getTarget_206_1977()
duplicates = {}
for datum in data:
if datum[0] in duplicates:
duplicates[datum[0]].append(datum[1])
else:
duplicates[datum[0]] = [datum[1]]
new_data = []
for smile, sval_arr in duplicates.iteritems():
lemin = np.amin(sval_arr)
lemax = np.amax(sval_arr)
if len(sval_arr) == 1:
new_data.append([smile,sval_arr[0]])
elif lemin != 0 and lemax != 0:
if not (len(sval_arr) < 20 and int(math.log(lemin, 10)) != int(math.log(lemax, 10))):
new_data.append([smile,np.median(sval_arr)])
data = new_data
df_data = {}
df_data['smiles'] = []
df_data['sval'] = []
df_reorder = ['smiles','sval']
for name, function in Descriptors.descList:
df_data[name] = []
df_reorder.append(name)
for i in range(len(data)):
smiles = data[i][0]
sval = data[i][1]
mol = Chem.MolFromSmiles(smiles)
for name, function in Descriptors.descList:
df_data[name].append(function(mol))
df_data['smiles'].append(smiles)
df_data['sval'].append(sval)
# create dataframe, reorder values so that smiles is first, sval is second
df = pd.DataFrame(df_data)
df = df[df_reorder]
df.set_index('smiles', inplace=True)
# we convert the IC50 values to pIC50
df.sval = df.sval.apply(lambda x : -1.0 * np.log10(x / 1.0e9))
# drop infinite values
df = df.drop(df[df.sval == np.inf].index)
def get_removed_feats(df, model):
return df.columns.values[1:][~model.get_support()]
def update_df(df, removed_descriptors, inplace=True):
if inplace:
df.drop(removed_descriptors, 1, inplace=True)
# print(df.shape)
return df
else:
new_df = df.drop(removed_descriptors, 1, inplace=False)
# print(new_df.shape)
return new_df
# find the names of the columns with zero variance
var_sel = VarianceThreshold()
var_sel.fit(df.iloc[:,1:])
removed_descriptors = get_removed_feats(df, var_sel)
# update the data frame
update_df(df, removed_descriptors)
# correlation filter
def find_correlated(data):
correlation_matrix = data.iloc[:,1:].corr(method='spearman')
removed_descs = set()
all_descs = correlation_matrix.columns.values
for label in all_descs:
if label not in removed_descs:
correlations_abs = correlation_matrix[label].abs()
mask = (correlations_abs > 0.7).values
to_remove = set(all_descs[mask])
to_remove.remove(label)
removed_descs.update(to_remove)
return removed_descs
update_df(df, find_correlated(df))
# regression tests
from sklearn.feature_selection import SelectPercentile,SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
# keep only the descriptors that show significant
# correlation with the target variable (pIC50)
regre_sele = SelectPercentile(f_regression, percentile=50)
regre_sele.fit(df.iloc[:,1:], df.sval)
removed_descriptors = get_removed_feats(df, regre_sele)
# update the data frame
update_df(df, removed_descriptors)
# print selected features
print(df.columns.tolist())
from sklearn.metrics import mean_squared_error
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
def createModelAndMeasure(df):
scaler = StandardScaler(copy=False)
scaler.fit(df.iloc[:,1:])
scaled_features = pd.DataFrame(scaler.transform(df.iloc[:,1:]), columns=df.iloc[:,1:].columns)
# Next, we create the datasets for cross-validation and testing:
features_train, features_test, sval_train, sval_test = train_test_split(
scaled_features
, df.sval
, test_size=0.4
, random_state=42
)
# and build the model:
param_grid = [
# {'C': [1, 10, 100, 1000], 'epsilon': [0.0, 0.1, 0.2, 0.3, 0.4], 'kernel': ['linear']},
# {'C': [1, 10, 100, 1000], 'epsilon': [0.0, 0.1, 0.2, 0.3, 0.4], 'kernel': ['poly'], 'degree' : [2, 3, 4, 5]},
{'C': [1, 10, 100, 1000], 'epsilon': [0.0, 0.1, 0.2, 0.3, 0.4], 'gamma': [0.01, 0.001, 0.0001], 'kernel': ['rbf']},
]
model = GridSearchCV(SVR(), param_grid, n_jobs=2, cv=5)
model.fit(features_train, sval_train)
model = model.best_estimator_
# print('Model params:')
# print(model.get_params())
# print()
# cross validation results
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(model, features_train, sval_train, cv=5)
scores_mse = cross_val_score(model, features_train, sval_train, cv=5, scoring='mean_squared_error')
results = {
'cv_mse': abs(scores_mse.mean()),
'cv_mse_d': scores_mse.std() * 2,
'v_mse': mean_squared_error(model.predict(features_test), sval_test)
}
return results
# print('Cross validation:')
# print("Mean R^2: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# print("Mean R: %0.2f (+/- %0.2f)" % (np.sqrt(scores).mean(), np.sqrt(scores).std() * 2))
# print("Mean MSE: %0.2f (+/- %0.2f)" % (abs(scores_mse.mean()), scores_mse.std() * 2))
# print()
# test validation results
# print('Test validation:')
# print("R^2: %0.2f" % model.score(features_test, sval_test))
# print("R: %0.2f" % np.sqrt(model.score(features_test, sval_test)))
# print("MSE: %0.2f" % mean_squared_error(model.predict(features_test), sval_test))
# print()
######################################################################
features_count = len(df.columns)-1
print(features_count)
results = []
# Further forward selection of k best
for fw_step in range(features_count,0,-1):
print('Selecting %d/%d features:' % (fw_step,features_count))
selector = SelectKBest(f_regression,k=fw_step)
selector.fit(df.iloc[:,1:], df.sval)
removed_descriptors = get_removed_feats(df, selector)
# update the data frame
new_df = update_df(df, removed_descriptors, inplace=False)
print(new_df.columns.tolist())
res = createModelAndMeasure(new_df)
res['k']=fw_step
results.append(res)
resultsdf = pd.DataFrame(results)
resultsdf.set_index('k',inplace=True)
print(resultsdf)
resultsdf.plot()
plt.show()
# print(features_mean.values)
# scaled_features = pd.DataFrame(((df.iloc[:,1:]-features_mean)/features_std).values, columns=df.iloc[:,1:].columns)
# print(scaled_features)
# error plot
# (sval_test - model.predict(features_test)).abs().hist(bins=30).plot()
# plt.show()
# PCA plot
print("Computing decomposition:")
from sklearn import decomposition
'''
n_components = 5
pca = decomposition.PCA(n_components=n_components)
pca.fit(df.iloc[:,1:])
pca_result = pca.transform(df.iloc[:,1:])
from mpl_toolkits.mplot3d import Axes3D
from itertools import combinations
plt.rcParams["figure.figsize"] = [15, 15]
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
PCAcombo = [3,1,0]
ax.scatter(
pca_result[:,PCAcombo[0]]
, pca_result[:,PCAcombo[1]]
, pca_result[:,PCAcombo[2]]
, c=df.sval
, cmap='YlOrRd'
)
ax.view_init(elev=30, azim=45)
ax.set_xlabel('PC%s' % (PCAcombo[0] + 1))
ax.set_ylabel('PC%s' % (PCAcombo[1] + 1))
ax.set_zlabel('PC%s' % (PCAcombo[2] + 1))
plt.show()
'''
'''
combos = list(combinations(range(n_components), 3))
plt.rcParams["figure.figsize"] = [15, 30]
fig = plt.figure(len(combos) / 2)
for idx, combo in enumerate(combos):
ax = fig.add_subplot(len(combos) / 2, 2, idx + 1, projection='3d')
ax.scatter(
pca_result[:,combo[0]]
, pca_result[:,combo[1]]
, pca_result[:,combo[2]]
, c=df.sval
, s=20
, cmap='YlOrRd' # red are the compounds with higher values of pIC50
)
ax.view_init(elev=30, azim=45)
ax.set_xlabel('PC%s' % (combo[0] + 1))
ax.set_ylabel('PC%s' % (combo[1] + 1))
ax.set_zlabel('PC%s' % (combo[2] + 1))
plt.show()
'''
'''
from sklearn.manifold import TSNE
model = TSNE(n_components=2)
TSNEdata = model.fit_transform(df.iloc[:,1:])
TSNEdf = pd.DataFrame(TSNEdata, columns=('x','y'))
TSNEdf['c'] = pd.Series(df.sval.values,index=TSNEdf.index)
plot = TSNEdf.plot.scatter(x = 'x', y = 'y', c = 'c', cmap = 'plasma')
plt.show()
'''
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/multi/test_drop.py | 2 | 4428 | import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_drop(idx):
dropped = idx.drop([("foo", "two"), ("qux", "one")])
index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")])
dropped2 = idx.drop(index)
expected = idx[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = idx.drop(["bar"])
expected = idx[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop("foo")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(index)
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(["foo", "two"])
# partially correct argument
mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(mixed_index)
# error='ignore'
dropped = idx.drop(index, errors="ignore")
expected = idx[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(["foo", "two"], errors="ignore")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = idx.drop(["foo", ("qux", "one")])
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ["foo", ("qux", "one"), "two"]
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(mixed_index)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(idx):
index = idx[idx.get_loc("foo")]
dropped = index.droplevel(0)
assert dropped.name == "second"
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index.droplevel(0)
assert dropped.names == ("two", "three")
dropped = index.droplevel("two")
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list():
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index[:2].droplevel(["three", "one"])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
msg = (
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left"
)
with pytest.raises(ValueError, match=msg):
index[:2].droplevel(["one", "two", "three"])
with pytest.raises(KeyError, match="'Level four not found'"):
index[:2].droplevel(["one", "four"])
def test_drop_not_lexsorted():
# GH 12078
# define the lexsorted version of the multi-index
tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
df = df.pivot_table(index="a", columns=["b", "c"], values="d")
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop("a"), not_lexsorted_mi.drop("a"))
| bsd-3-clause |
evgchz/scikit-learn | sklearn/neural_network/rbm.py | 15 | 11957 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
rng = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, rng)
v_ = self._sample_visibles(h_, rng)
return v_
def partial_fit(self, X):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
KeithYue/StockTrading | utility.py | 1 | 4059 | # coding=utf-8
# utility function of stock
import os
import pandas as pd
import numpy as np
import logging
# config the logging system
logging.basicConfig(level=logging.DEBUG)
# define Stock class
class Stock():
'''
the stock class
'''
def __init__(self, code):
self.code=code
# search and load the stock data
try:
for f in os.listdir('./data'):
if f.startswith(self.code):
self.path = './data/{}'.format(f)
data = pd.read_csv(self.path, parse_dates=['Date'])
data.rename(columns=str.lower, inplace=True)
except Exception as e:
logging.error(e)
self.data=data # the raw dataframe data
# logging.info(self.data)
return
def __str__(self):
return self.code
# modify the DataFrame class to convert dict-like data for talib compudation
def tt(self):
'''
convert DataFrame to talib computation data
tt means to talib
return dict-like data
'''
d = {}
for c in self.columns:
d[c] = np.asarray(self[c])
return d
pd.DataFrame.tt = tt
def gold_cross_number(s):
'''
given a time series, return the count of crossing
'''
count = 0
for i in range(0, len(s)-1):
if s[i] < 0 and s[i+1]>0:
count = count + 1
return count
def send_email(body):
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
user = "comp5331.ust@gmail.com"
pwd = "keith5805880"
recipient = "ywangby@connect.ust.hk"
subject = "From Keith: Promising Stock Codes"
msg = MIMEText(body, 'html')
msg['Subject'] = subject
msg['From'] = user
msg['To'] = recipient
try:
# server = smtplib.SMTP_SSL('smtp.gmail.com:465')
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.send_message(msg)
server.quit()
print('successfully sent the mail')
except Exception as e :
print(e)
# smooth the timeseries function
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y[(window_len/2-1):-(window_len/2)]
def test():
s = Stock('0001')
print(s.data)
if __name__ == '__main__':
test()
| apache-2.0 |
Sentient07/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
nkoukou/University_Projects_Year_3 | EDM_Assembly/base_class.py | 1 | 11366 | '''
Defines the base class of an electric potential grid.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
from numba import jit
# Global dimensions (used for plots)
sqc_x = (2., 'cm') # unit length for SquareCable
sqc_u = (10., 'V') # unit potential for SquareCable
edm_x = (10., 'mm') # unit length for Edm
edm_u = (2., 'kV') # unit potential for Edm
# Plot parameters
font = {'family' : 'normal',
'weight' : 'normal'}
mpl.rc('font', **font)
mpl.rcParams['lines.linewidth'] = 5.
# Functions compiled just-in-time
@jit
def gen_sc_grid(b, t, u):
'''
Generates SquareCable grid.
'''
grid = np.full((b,b), u)
fix = np.ones((b,b))
grid = np.pad(grid, ((t-b)/2,), 'constant', constant_values=(0,))
fix = np.pad(fix, ((t-b)/2,), 'constant', constant_values=(0,))
grid = np.pad(grid, 1, 'constant', constant_values=(0,))
fix = np.pad(fix, 1, 'constant', constant_values=(1,))
return grid, fix
@jit
def gen_edm_grid(tube_dist, scale=1):
'''
Generates Edm grid.
'''
small_plate = np.full(2,1, dtype='float64')
big_plate = np.full(20,4, dtype='float64')
gap = np.zeros(1, dtype='float64')
row_one = np.concatenate((small_plate, gap, big_plate, gap, small_plate))
row_two = np.zeros(row_one.size)
row_three = -row_one
grid = np.vstack((row_one, row_two, row_three))
grid = np.pad(grid, tube_dist, 'constant', constant_values=(0,))
fix = np.where(grid==0, 0, 1)
if scale != 1:
scale = np.ones((scale, scale))
grid = np.kron(grid, scale)
fix = np.kron(fix, scale)
grid = np.pad(grid, 1, 'constant', constant_values=(0,))
fix = np.pad(fix, 1, 'constant', constant_values=(1,))
return grid, fix
@jit
def update(grid, fix, scale, w=-1):
'''
Updates SquareCable or Edm grid.
Relaxation parameter w (0 < w < 2) affects the speed of convergence.
- w = 'j': solves with Jacobi method
- w = -1: solves with estimated optimal w
'''
if w=='j' or w=='J':
new_grid=np.copy(grid)
for index, fixed in np.ndenumerate(fix):
if fixed: continue
new_grid[index] = 0.25*( grid[index[0]-1, index[1]] +
grid[index[0]+1, index[1]] +
grid[index[0], index[1]-1] +
grid[index[0], index[1]+1] )
return new_grid
if w==-1:
coef = float(grid.shape[1])/grid.shape[0]
const = 2.0 if coef==1. else 5.5
w = 2./(1+const/(coef*scale))
for index, fixed in np.ndenumerate(fix):
if fixed: continue
grid[index] = ((1-w) * grid[index] + 0.25 * w *
( grid[index[0]-1, index[1]] +
grid[index[0]+1, index[1]] +
grid[index[0], index[1]-1] +
grid[index[0], index[1]+1] ))
return grid
# Base class
class PotentialGrid(object):
def update_grid(self, w=-1):
'''
Updates grid once.
'''
self.grid = update(self.grid, self.fix, self.scale, w)
def converge_grid(self, w=-1, accuracy=0.05):
'''
Updates grid until convergence.
'''
temporal_spread = 1.
spatial_spread = 0.
updates = 0
while temporal_spread > accuracy*spatial_spread:
horizontal_spread = np.absolute(np.diff(self.grid, axis=-1)).max()
vertical_spread = np.absolute(np.diff(self.grid, axis=0)).max()
spatial_spread = max(horizontal_spread, vertical_spread)
old_grid = np.copy(self.grid)
self.update_grid(w)
temporal_spread = np.linalg.norm( (self.grid - old_grid) )
updates += 1
if updates%1000==0:
print '\nspatial spread = ', spatial_spread
print 'temporal spread = ', temporal_spread
print 'updates = ', updates
return temporal_spread, spatial_spread, updates
def plot_grid(self, title=None):
'''
Plots grid's potential field. Parameter title sets the title of the
plot.
'''
if self.grid.shape[0] == self.grid.shape[1]:
colour, shrink, aspect = 'YlOrRd', 1, (1, 10)
else:
colour, shrink, aspect = 'RdYlBu', 0.5, (1.2, 8)
grid = self.dim['u'][0]*self.grid
xedge = (grid.shape[1]-2.)*self.dim['x'][0]/self.scale/2.
yedge = (grid.shape[0]-2.)*self.dim['x'][0]/self.scale/2.
fig = plt.figure()
ax = fig.add_subplot(111)
if title=='intro':
ax.set_title(r'EDM experiment plate assembly', fontsize=45)
elif title=='results':
ax.set_title(r'Electric Potential Field', fontsize=45)
axx = ax.imshow(grid, extent= [-xedge, xedge, -yedge, yedge],
aspect=aspect[0], interpolation='None',
cmap=plt.cm.get_cmap(colour))
ax.set_xlabel(r'$system\ size\ ({0})$'.format(self.dim['x'][1]),
fontsize=45)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.tick_params(axis='both', labelsize=40)
cbar = fig.colorbar(axx, shrink=shrink, aspect=aspect[1])
cbar.ax.tick_params(labelsize=40)
cbar.set_label(r'$Potential\ \phi\ ({0})$'.format(self.dim['u'][1]),
size=50)
def analyse_scale(self, w=-1, datapoints=20, accuracy=0.05, plot=True):
'''
Plots number of updates against scale for given relaxation parameter w,
number of datapoints and accuracy of convergence. If plot=False,
returns computed updates and scales.
Plots also maximum spatial spread of potential against scale.
'''
scales = np.linspace(10, 10*datapoints, datapoints)
mesh, updates = [], []
for s in scales:
print s
self.set_scale(s, silent=True)
data = self.converge_grid(w, accuracy)
updates.append(data[2])
mesh.append(data[1]*self.dim['u'][0])
if not plot: return scales, updates
if w=='j':
xaxis = scales*scales
lab= r'$scale^2\ \left(\frac{1}{(%g%s)^2}\right)$'% (
self.dim['x'][0], self.dim['x'][1])
else:
xaxis = scales
lab= r'$scale\ \left(\frac{1}{%g%s}\right)$'% (self.dim['x'][0],
self.dim['x'][1])
slope = updates[-1]/xaxis[-1]
fit = slope*xaxis
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Number of updates against Scale', fontsize=45)
ax.plot(xaxis, updates, label=r'Numerical data')
ax.plot(xaxis, fit, label=r'Linear fit ($slope=%.2f$)'% (slope))
ax.set_xlabel(lab, fontsize=35)
ax.set_ylabel(r'$temporal\ updates$', fontsize=35)
ax.tick_params(axis='both', labelsize=25)
ax.legend(loc='upper left', prop={'size':40})
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Spatial spread against Scale', fontsize=45)
ax.plot(scales, mesh)
ax.set_xlabel(r'$scale\ \left(\frac{1}{%g%s}\right)$'%
(self.dim['x'][0], self.dim['x'][1]), fontsize=40)
ax.set_ylabel(r'$spatial\ spread\ (%s)$'% (self.dim['u'][1]),
fontsize=40)
ax.tick_params(axis='both', labelsize=25)
def analyse_spread(self, w=-1, datapoints=10):
'''
Plots spatial spread of potential against accuracy of convergence for
given relaxation parameter w and number of datapoints.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_title(r'Spatial spread against Accuracy of convergence',
# fontsize=75)
ax.set_xlabel(r'$fraction\ of\ spatial\ spread$', fontsize=40)
ax.invert_xaxis()
ax.set_ylabel(r'$spatial\ spread\ (%s)$'% (self.dim['u'][1]),
fontsize=40)
ax.tick_params(axis='both', labelsize=30)
accuracies = np.logspace(-1,-10,datapoints)
for scale in np.linspace(10,10*datapoints,datapoints):
self.set_scale(scale, silent=True)
spreads = []
for acc in accuracies:
t,s,u = self.converge_grid(w, acc)
spreads.append(s*self.dim['u'][0])
ax.plot(accuracies, spreads, label='Scale={0}'.format(scale))
return accuracies, spreads
def analyse_omega(self, guess, scales=-1, datapoints=20,
accuracy=0.05, plot=True):
'''
Plots number of updates against relaxation parameter for given initial
guess, system scales, number of datapoints and accuracy of convergence.
If plot=False, returns computed updates and relaxation parameters.
'''
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Optimal omega search at different scales',
fontsize=55)
ax.set_xlabel('$relaxation\ parameter\ \omega$', fontsize=37)
ax.set_ylabel('$temporal\ updates$', fontsize=37)
ax.tick_params(axis='both', labelsize=30)
ws = np.pad(np.array([guess]), datapoints/2, 'linear_ramp',
end_values=(guess-0.05, 1.99))
if scales==-1: scales = [self.scale]
for scale in scales:
updates = []
for w in ws:
self.set_scale(scale, silent=True)
data = self.converge_grid(w, accuracy)
updates.append(data[-1])
if plot: ax.plot(ws, updates, label=r'Scale ${0}$'.format(scale))
else: return ws, updates
if plot: ax.legend(loc='upper center', prop={'size':40})
def plot_omega_vs_scale(self, const=2., datapoints=20):
'''
Plots relaxation parameter against scale along with approximate fit for
given number of datapoints.
The fitting is approximated by the user with the constant const which
appears in the formula: 2(1+const/(coef*scale)), where coef is the
ratio of x and y dimensions of the system.
'''
coef = float(self.grid.shape[1]-2)/(self.grid.shape[0]-2)
scales = np.linspace(10, 50, datapoints)
fit = 2./(1+const/(coef*scales))
ws = []
for scale in scales:
self.set_scale(scale, silent=True)
guess = 2./(1+const/(coef*self.scale))
w, update = self.analyse_omega(guess, plot=False)
w = w[update.index(min(update))]
ws.append(w)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Relaxation parameter against scale', fontsize=55)
ax.set_xlabel(r'$scale\ \left(\frac{1}{%g%s}\right)$'%
(self.dim['x'][0], self.dim['x'][1]), fontsize=37)
ax.set_ylabel('$relaxation\ parameter\ \omega$', fontsize=37)
ax.tick_params(axis='both', labelsize=30)
ax.plot(scales, ws, label=r'Numerical data')
ax.plot(scales, fit, label=r'Approximate fit ($C=%.1f$)'% (const))
ax.legend(loc='upper left', prop={'size':40})
return scales, ws
| mit |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/frame/test_mutate_columns.py | 7 | 7831 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=lrange(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_index_equal(df.columns, Index(['foo', 'c', 'b', 'a']))
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
self.assert_index_equal(df.columns,
Index(['foo', 'c', 'bar', 'b', 'a']))
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
# GH 13522
df = DataFrame(index=['A', 'B', 'C'])
df['X'] = df.index
df['X'] = ['x', 'y', 'z']
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
# 10912
# inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
'A', 'B', 'C'], index=['X', 'Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=[
'A', 'C'], index=['X', 'Y'])
assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=['X', 'Y'], name='B') + 1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]],
columns=['c', 'a', 'b'])
assert_frame_equal(result, expected)
| mit |
bikong2/scikit-learn | sklearn/tests/test_random_projection.py | 79 | 14035 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
evidation-health/pymc3 | setup.py | 1 | 2670 | #!/usr/bin/env python
from setuptools import setup
import sys
DISTNAME = 'pymc3'
DESCRIPTION = "PyMC3"
LONG_DESCRIPTION = """Bayesian estimation, particularly using Markov chain Monte Carlo (MCMC), is an increasingly relevant approach to statistical estimation. However, few statistical software packages implement MCMC samplers, and they are non-trivial to code by hand. ``pymc3`` is a python package that implements the Metropolis-Hastings algorithm as a python class, and is extremely flexible and applicable to a large suite of problems. ``pymc3`` includes methods for summarizing output, plotting, goodness-of-fit and convergence diagnostics."""
MAINTAINER = 'John Salvatier'
MAINTAINER_EMAIL = 'jsalvati@u.washington.edu'
AUTHOR = 'John Salvatier and Christopher Fonnesbeck'
AUTHOR_EMAIL = 'chris.fonnesbeck@vanderbilt.edu'
URL = "http://github.com/pymc-devs/pymc"
LICENSE = "Apache License, Version 2.0"
VERSION = "3.0"
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
install_reqs = ['numpy>=1.7.1', 'scipy>=0.12.0', 'matplotlib>=1.2.1',
'Theano<=0.7.1dev', 'pandas>=0.15.0']
if sys.version_info < (3, 4):
install_reqs.append('enum34')
test_reqs = ['nose']
if sys.version_info[0] == 2: # py3 has mock in stdlib
test_reqs.append('mock')
dep_links = ['https://github.com/Theano/Theano/tarball/master#egg=Theano-0.7.1dev']
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
packages=['pymc3', 'pymc3.distributions',
'pymc3.step_methods', 'pymc3.tuning',
'pymc3.tests', 'pymc3.glm', 'pymc3.examples',
'pymc3.backends'],
package_data = {'pymc3.examples': ['data/*.*']},
classifiers=classifiers,
install_requires=install_reqs,
dependency_links=dep_links,
tests_require=test_reqs,
test_suite='nose.collector')
| apache-2.0 |
cgrohman/ponies | hypo1.py | 1 | 4172 | import numpy as np
import pandas as pd
from horse import Horse
from race import Race
import pdb
from sklearn.preprocessing import Imputer, StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
#------------------------------------------------------------------------------
def main():
dataset = pd.read_csv('results/ml_2017-04-17.csv')
#data clean up
# H Odds columns: impute and scale
horse_odds_labels = ['h0_odds','h1_odds', 'h2_odds', 'h3_odds', 'h4_odds', 'h5_odds', 'h6_odds', 'h7_odds', 'h8_odds', 'h9_odds', 'h10_odds', 'h11_odds', 'h12_odds', 'h13_odds', 'h14_odds', 'h15_odds', 'h16_odds', 'h17_odds','h18_odds']
dataset = impute_def_col(dataset, horse_odds_labels)
dataset = scale_def_col(dataset, horse_odds_labels)
# H Weights columns: impute and scale
horse_weight_labels = ['h0_weight','h1_weight', 'h2_weight', 'h3_weight', 'h4_weight', 'h5_weight', 'h6_weight', 'h7_weight', 'h8_weight', 'h9_weight', 'h10_weight', 'h11_weight', 'h12_weight', 'h13_weight', 'h14_weight', 'h15_weight', 'h16_weight', 'h17_weight','h18_weight']
dataset = impute_def_col(dataset, horse_weight_labels)
dataset = scale_def_col(dataset, horse_weight_labels)
# H Claim value columns: impute and scale
horse_claim_value_labels = ['h0_claim_value','h1_claim_value', 'h2_claim_value', 'h3_claim_value', 'h4_claim_value', 'h5_claim_value', 'h6_claim_value', 'h7_claim_value', 'h8_claim_value', 'h9_claim_value', 'h10_claim_value', 'h11_claim_value', 'h12_claim_value', 'h13_claim_value', 'h14_claim_value', 'h15_claim_value', 'h16_claim_value', 'h17_claim_value','h18_claim_value']
dataset = impute_def_col(dataset, horse_claim_value_labels)
dataset = scale_def_col(dataset, horse_claim_value_labels)
# R Purse value columns: impute and scale
dataset = impute_def_col(dataset, ['purse'])
dataset = scale_def_col(dataset, ['purse'])
# R Purse value columns: impute and scale
dataset = impute_def_col(dataset, ['distance'])
dataset = scale_def_col(dataset, ['distance'])
# R Class Rating value columns: impute and scale
dataset = impute_def_col(dataset, ['class_rating'])
dataset = scale_def_col(dataset, ['class_rating'])
# Data splitting
x = dataset.iloc[:,:-1]
y = dataset.iloc[:,-1]
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = trian_test_split(x, y, test_size=0.2)
# SVC
from sklearn.svm import SVC
svc = SVC()
svc.fit(x_train, y_train)
y_pred_svm = svc.predict(x_test)
# Validation
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred_svm)
# predicted 0 1 Total:
# real 0 [7590, 1240] 8830
# 1 [2715, 2187] 4902
# Predict NOT wps accuracy: 85.95%
# Predict wps accuracy: 44.61%
# Removing race_number from data
x_nrn = x.iloc[:,1:]
x_nrn_train, x_nrn_test, y_nrn_train, y_nrn_test = train_test_split(x_nrn, y, test_size=0.2)
svc_nrn.fit(x_nrn_train,y_nrn_train)
y_pred_nrn = svc_nrn.predict(x_nrn_test)
# predicted 0 1 Total:
# real 0 [7609, 1221] 8830
# 1 [2812, 2090] 4902
# Predict NOT wps accuracy: 86.17%
# Presict wps accuracy: 42.63%
#------------------------------------------------------------------------------
def impute_def_col(df, label):
for la in label:
imp = Imputer()
col = np.array(df[la]).T
try:
df[la] = imp.fit_transform(col.reshape(-1,1))
except:
pdb.set_trace()
return df
#------------------------------------------------------------------------------
def scale_def_col(df, label):
for la in label:
sc = StandardScaler()
col = np.array(df[la]).T
try:
df[la] = sc.fit_transform(col.reshape(-1,1))
except:
pdb.set_trace()
return df
#------------------------------------------------------------------------------
def ohe_col(df, label):
for la in label:
le = LabelEncoder()
ohe = OneHotEncoder()
col = np.array(df[la]).T
try:
col_le = le.fit_transform(col.reshape(-1,1))
return
#------------------------------------------------------------------------------
if __name__ == '__main__':
main() | gpl-3.0 |
vberthiaume/vblandr | src/silenceTest.py | 1 | 3381 | import subprocess as sp
import scikits.audiolab
import numpy as np
from scipy.fftpack import fft, ifft
from scipy.io import wavfile
import bisect
import matplotlib.pyplot as plt
import time
plt.rcParams['agg.path.chunksize'] = 10000
#--CONVERT MP3 TO WAV------------------------------------------
#song_path = '/home/gris/Music/vblandr/test_small/punk/07 Alkaline Trio - Only Love.mp3'
#song_path = '/mnt/c/Users/barth/Documents/vblandr/train_small/punk/01 - True North.mp3'
#song_path = '/mnt/c/Users/barth/Documents/vblandr/train_small/audiobook/Blaise_Pascal_-_Discours_sur_les_passions_de_l_amour.mp3'
song_path = '/home/gris/Music/vblandr/train_small/audiobook/Blaise_Pascal_-_Discours_sur_les_passions_de_l_amour.mp3'
command = [ 'ffmpeg',
'-i', song_path,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ar', '44100', # sms tools wavread can only read 44100 Hz
'-ac', '1', # mono file
'-loglevel', 'quiet',
'-'] #instead of having an output file, using '-' sends it in the pipe. not actually sure how this works.
#run the command
pipe = sp.Popen(command, stdout=sp.PIPE)
#read the output into a numpy array
stdoutdata = pipe.stdout.read()
audio_array = np.fromstring(stdoutdata, dtype=np.int16)
#--------------------------------------------------------------
def removeInitialSilence(cur_song_pcm):
#start_time = time.clock()
#raw
#min = np.min(cur_song_pcm)
#max = np.max(cur_song_pcm)
#print ("raw min:", min, "max:", max)
#using absolute value
env = abs(cur_song_pcm)
#min = np.min(env)
#max = np.max(env)
#print ("abs min:", min, "max:", max)
#float
env = env.astype(np.float32) #cast the array into float32
#min = np.min(env)
#max = np.max(env)
#print ("float min:", min, "max:", max)
#norm1
max = np.max(env)
env = np.multiply(env, 1.0 / max) #convert int16 range into [-.5, .5], but really because of the abs we're already between [0,.5]
min = np.min(env)
max = np.max(env)
print ("norm1 min:", min, "max:", max)
#end_time = time.clock()
#print ("time:", end_time - start_time)
plt.plot(env)
plt.show()
#convolving as a way to do a fast moving average
N = 100
env = np.convolve(env, np.ones((N,))/N)[(N-1):]
#first 44100 samples are silent. what is their max amplitude?
#print np.max(env[:44100])
#at 1.5s, we're clearly into audio, what is the max amplitude?
#print "before .5s, max: ", np.max(env[:.5*44100])
#here we're still in noise part
#print "in vocal part, max: ", np.max(env[.625*44100])
#detect first non-silent sample
threshold = .00004
endOfSilence = bisect.bisect(env,threshold)
print "end of silence: ", endOfSilence
#these don't work on hesse
plt.plot(env)
plt.show()
return cur_song_pcm[endOfSilence:]
#---- REMOVE SILENCE --------------------
ifft_output = removeInitialSilence(audio_array)
#truncate to 1 sec
ifft_output = ifft_output[:1*44100]
#--SAVE WAVE AS NEW FILE ----------------
ifft_output = np.round(ifft_output).astype('int16')
wavfile.write('/home/gris/Music/vblandr/silenceTest.wav', 44100, ifft_output)
#wavfile.write('/mnt/c/Users/barth/Documents/vblandr/silenceTest.wav', 44100, ifft_output)
| apache-2.0 |
joshloyal/scikit-learn | sklearn/linear_model/passive_aggressive.py | 28 | 11542 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None, average=False):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False,
average=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
QuLogic/burnman | misc/benchmarks/solidsolution_benchmarks.py | 1 | 6797 | # Benchmarks for the solid solution class
import os.path, sys
sys.path.insert(1,os.path.abspath('../..'))
import burnman
from burnman import minerals
from burnman.processchemistry import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
atomic_masses=read_masses()
'''
Solvus shapes (a proxy for Gibbs free energy checking
'''
# van Laar parameter
# Figure 2a of Holland and Powell, 2003
# Temperature dependence
# Figure 2b of Holland and Powell, 2003
# A specific solvus example: sanidine-high albite
# Includes asymmetry and pressure, temperature dependence
# Figure 3 of Holland and Powell, 2003
'''
Excess properties
'''
# Configurational entropy
# Navrotsky and Kleppa, 1967
class o_d_spinel(burnman.SolidSolution):
def __init__(self):
# Name
self.name='orthopyroxene'
# Endmembers (cpx is symmetric)
endmembers = [[minerals.HP_2011_ds62.sp(), '[Mg][Al]2O4'],[minerals.HP_2011_ds62.sp(), '[Al][Mg1/2Al1/2]2O4']]
# Interaction parameters
enthalpy_interaction=[[0.0]]
burnman.SolidSolution.__init__(self, endmembers, \
burnman.solutionmodel.SymmetricRegularSolution(endmembers, enthalpy_interaction) )
comp = np.linspace(0.001, 0.999, 100)
sp=o_d_spinel()
sp_entropies = np.empty_like(comp)
sp_entropies_NK1967= np.empty_like(comp)
for i,c in enumerate(comp):
molar_fractions=[1.0-c, c]
sp.set_composition( np.array(molar_fractions) )
sp.set_state( 1e5, 298.15 )
sp_entropies[i] = sp.solution_model._configurational_entropy( molar_fractions )
sp_entropies_NK1967[i] = -8.3145*(c*np.log(c) + (1.-c)*np.log(1.-c) + c*np.log(c/2.) + (2.-c)*np.log(1.-c/2.)) # eq. 7 in Navrotsky and Kleppa, 1967.
#fig1 = mpimg.imread('configurational_entropy.png') # Uncomment these two lines if you want to overlay the plot on a screengrab from SLB2011
#plt.imshow(fig1, extent=[0.0, 1.0,0.,17.0], aspect='auto')
plt.plot( comp, sp_entropies_NK1967, 'b-', linewidth=3.)
plt.plot( comp, sp_entropies, 'r--', linewidth=3.)
plt.xlim(0.0,1.0)
plt.ylim(0.,17.0)
plt.ylabel("Configurational entropy of solution (J/K/mol)")
plt.xlabel("fraction inverse spinel")
plt.show()
# Configurational entropy
# Figure 3b of Stixrude and Lithgow-Bertelloni, 2011
class orthopyroxene_red(burnman.SolidSolution):
def __init__(self):
# Name
self.name='orthopyroxene'
# Endmembers (cpx is symmetric)
endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg][Si]SiO6'],[minerals.SLB_2011.mg_tschermaks(), 'Mg[Al][Al]SiO6'] ]
# Interaction parameters
enthalpy_interaction=[[0.0]]
burnman.SolidSolution.__init__(self, endmembers, \
burnman.solutionmodel.SymmetricRegularSolution(endmembers, enthalpy_interaction) )
class orthopyroxene_blue(burnman.SolidSolution):
def __init__(self):
# Name
self.name='orthopyroxene'
# Endmembers (cpx is symmetric)
endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg]Si2O6'],[minerals.SLB_2011.mg_tschermaks(), 'Mg[Al]AlSiO6'] ]
# Interaction parameters
enthalpy_interaction=[[0.0]]
burnman.SolidSolution.__init__(self, endmembers, \
burnman.solutionmodel.SymmetricRegularSolution(endmembers, enthalpy_interaction) )
class orthopyroxene_long_dashed(burnman.SolidSolution):
def __init__(self):
# Name
self.name='orthopyroxene'
# Endmembers (cpx is symmetric)
endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg]Si2O6'],[minerals.SLB_2011.mg_tschermaks(), '[Mg1/2Al1/2]2AlSiO6'] ]
# Interaction parameters
enthalpy_interaction=[[10.0e3]]
burnman.SolidSolution.__init__(self, endmembers, \
burnman.solutionmodel.SymmetricRegularSolution(endmembers, enthalpy_interaction) )
class orthopyroxene_short_dashed(burnman.SolidSolution):
def __init__(self):
# Name
self.name='orthopyroxene'
# Endmembers (cpx is symmetric)
endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg][Si]2O6'],[minerals.SLB_2011.mg_tschermaks(), 'Mg[Al][Al1/2Si1/2]2O6'] ]
# Interaction parameters
enthalpy_interaction=[[0.0]]
burnman.SolidSolution.__init__(self, endmembers, \
burnman.solutionmodel.SymmetricRegularSolution(endmembers, enthalpy_interaction) )
comp = np.linspace(0, 1.0, 100)
opx_models=[orthopyroxene_red(), orthopyroxene_blue(), orthopyroxene_long_dashed(), orthopyroxene_short_dashed()]
opx_entropies = [ np.empty_like(comp) for model in opx_models ]
for idx, model in enumerate(opx_models):
for i,c in enumerate(comp):
molar_fractions=[1.0-c, c]
model.set_composition(np.array(molar_fractions))
model.set_state(0., 0.)
opx_entropies[idx][i] = model.solution_model._configurational_entropy(molar_fractions)
fig1 = mpimg.imread('configurational_entropy.png') # Uncomment these two lines if you want to overlay the plot on a screengrab from SLB2011
plt.imshow(fig1, extent=[0.0, 1.0,0.,17.0], aspect='auto')
plt.plot( comp, opx_entropies[0], 'r--', linewidth=3.)
plt.plot( comp, opx_entropies[1], 'b--', linewidth=3.)
plt.plot( comp, opx_entropies[2], 'g--', linewidth=3.)
plt.plot( comp, opx_entropies[3], 'g-.', linewidth=3.)
plt.xlim(0.0,1.0)
plt.ylim(0.,17.0)
plt.ylabel("Configurational entropy of solution (J/K/mol)")
plt.xlabel("cats fraction")
plt.show()
# Excess volume of solution
# Excess enthalpy of solution
# Figure 5 of Stixrude and Lithgow-Bertelloni, 2011
class clinopyroxene(burnman.SolidSolution):
def __init__(self):
# Name
self.name='clinopyroxene'
# Endmembers (cpx is symmetric)
endmembers = [[minerals.SLB_2011.diopside(), '[Ca][Mg][Si]2O6'],[minerals.SLB_2011.ca_tschermaks(), '[Ca][Al][Si1/2Al1/2]2O6'] ]
# Interaction parameters
enthalpy_interaction=[[26.e3]]
alphas = np.array( [1.0, 3.5] )
burnman.SolidSolution.__init__(self, endmembers, \
burnman.solutionmodel.AsymmetricRegularSolution(endmembers, alphas, enthalpy_interaction) )
cpx = clinopyroxene()
comp = np.linspace(0, 1.0, 100)
gibbs = np.empty_like(comp)
for i,c in enumerate(comp):
cpx.set_composition( np.array([1.0-c, c]) )
cpx.set_state( 0., 0. )
gibbs[i] = cpx.excess_gibbs
fig1 = mpimg.imread('dicats.png') # Uncomment these two lines if you want to overlay the plot on a screengrab from SLB2011
plt.imshow(fig1, extent=[0.0, 1.0,-2.,8.0], aspect='auto')
plt.plot( comp, gibbs/1000., 'b--', linewidth=3.)
plt.xlim(0.0,1.0)
plt.ylim(-2.,8.0)
plt.ylabel("Excess enthalpy of solution (kJ/mol)")
plt.xlabel("cats fraction")
plt.show()
| gpl-2.0 |
hdmetor/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
bthirion/scikit-learn | sklearn/feature_extraction/hashing.py | 74 | 6153 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
madengr/usrp_rf_tests | apps/b200_two_tone_tx_test.py | 1 | 5934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 26 20:28:23 2014
USRP generates two tones with swept gain and frequency
Tone and IMD powers measured with spectrum analyzer
@author: madengr
"""
import instruments
import numpy as np
import time
from gnuradio import gr
from gnuradio import uhd
from gnuradio import analog
from gnuradio import blocks
import matplotlib.pyplot as plt
class MyTopBlock(gr.top_block):
""" Class for two tone output from USRP """
def __init__(self):
# Call the initialization method from the parent class
gr.top_block.__init__(self)
# Default constants
# Each tone amplitude is -9 dBFS, for a combined peak of -3 dBFS
uhd_args = "type=b200"
self.sample_rate = 250E3
self.center_freq = 3E9
self.gain = 60
self.sig0_freq = 50E3
self.sig1_freq = 75E3
self.sig_amp = 1/(2*np.sqrt(2))
# Setup the signal generator source
sig0 = analog.sig_source_c(self.sample_rate,
analog.GR_SIN_WAVE,
self.sig0_freq,
self.sig_amp)
sig1 = analog.sig_source_c(self.sample_rate,
analog.GR_SIN_WAVE,
self.sig1_freq,
self.sig_amp)
adder = blocks.add_cc()
# Setup the USRP sink, using self so USRP methods are accesible
self.usrp = uhd.usrp_sink(uhd_args, uhd.io_type_t.COMPLEX_FLOAT32, 1)
self.usrp.set_clock_source("external", 0)
self.usrp.set_samp_rate(self.sample_rate)
self.usrp.set_center_freq(self.center_freq)
self.usrp.set_gain(self.gain)
# Connect the source to sink
self.connect(sig0, (adder, 0))
self.connect(sig1, (adder, 1))
self.connect(adder, self.usrp)
def main():
""" Sweep through frequency and gain, and measure power """
# Create instances of the spectrum analyzer and GR flow
specan = instruments.SpecAn8566B(18)
tb = MyTopBlock()
# Start the flow and wait a little
tb.start()
time.sleep(1)
# Setup the spectrum analyzer
specan.preset()
specan.set_ref_level(30)
specan.set_span(tb.sample_rate)
specan.set_rbw(3000)
specan.set_vbw(1000)
# Create a list of center frequencies and append some more
center_freq_list = [100E6, 500E6, 1E9]
center_freq_start = 2E9
center_freq_stop = 6E9
center_freq_step = 2E9
center_freq = center_freq_start
while center_freq <= center_freq_stop:
center_freq_list.append(center_freq)
center_freq = center_freq + center_freq_step
# Create a list of gains
gain_start = 70
gain_stop = 89
gain_step = 1.0
gain_list = []
gain = gain_start
while gain <= gain_stop:
gain_list.append(gain)
gain = gain + gain_step
# Create arrays to hold the tone power and IMD3 level
tone_power = np.zeros((len(center_freq_list), len(gain_list)))
imd3_level = np.zeros((len(center_freq_list), len(gain_list)))
# Sweep through frequency and gain lists, and measure tone powers
for freq_index, center_freq in enumerate(center_freq_list):
tb.usrp.set_center_freq(center_freq)
specan.set_center_freq(center_freq)
print ""
print "Center Frequency = " + str(center_freq/1E6) + " MHz"
for gain_index, gain in enumerate(gain_list):
tb.usrp.set_gain(gain)
# Measure the sig0 tone
specan.set_mkr_freq(center_freq + tb.sig0_freq)
specan.sweep()
tone_power[freq_index][gain_index] = specan.get_mkr_amp()
# Measure the lower IMD3 product
specan.set_mkr_freq(center_freq + 2*tb.sig0_freq - tb.sig1_freq)
imd3_level[freq_index][gain_index] = specan.get_mkr_amp() - \
tone_power[freq_index][gain_index]
# Print the result
print "Gain = %1.1f dB, SIG0 Power = % 1.1f dBm, IMD3 = %1.1f dBc"\
% (gain, tone_power[freq_index][gain_index], \
imd3_level[freq_index][gain_index])
# Stop the flow and wait for it to finish
tb.stop()
tb.wait()
# Save the data to file
#numpy.savetxt('tone_power.txt', tone_power, fmt='%+2.2f')
#numpy.savetxt('imd3_level.txt', imd3_level, fmt='%+2.2f')
# Plot the single tone power
plt.figure(num=0, figsize=(8, 6), dpi=150)
for freq_index, center_freq in enumerate(center_freq_list):
freq_label = str(int(center_freq/1E6)) + " MHz"
plt.plot(gain_list[:], tone_power[freq_index, :], label=str(freq_label))
plt.legend(loc='upper left')
plt.xlabel('Gain (dB)')
plt.ylabel('Single Tone Power (dBm)')
plt.grid()
plt.title("B200 Two Tone TX Test over Frequency and Gain")
plt.suptitle("%1.3f Tone Amplitudes @ %i kHz Offset @ %i kHz Spacing" % \
(tb.sig_amp, (tb.sig1_freq + tb.sig0_freq)/2E3, \
(tb.sig1_freq - tb.sig0_freq)/1E3))
plt.savefig('usrp_two_tone_tx_power_graph.png')
plt.show()
# Plot the IMD3 level
plt.figure(num=1, figsize=(8, 6), dpi=150)
for freq_index, center_freq in enumerate(center_freq_list):
freq_label = str(int(center_freq/1E6)) + " MHz"
plt.plot(gain_list[:], imd3_level[freq_index, :], label=str(freq_label))
plt.legend(loc='upper left')
plt.xlabel('Gain (dB)')
plt.ylabel('IMD3 (dBc)')
plt.grid()
plt.title("B200 Two Tone TX Test over Frequency and Gain")
plt.suptitle("%1.3f Tone Amplitudes @ %i kHz Offset @ %i kHz Spacing" % \
(tb.sig_amp, (tb.sig1_freq + tb.sig0_freq)/2E3, \
(tb.sig1_freq - tb.sig0_freq)/1E3))
plt.savefig('b200_two_tone_tx_imd3_graph.png')
plt.show()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
libAtoms/matscipy | examples/electrochemistry/samples_pb_c2d.py | 1 | 63342 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Poisson-Boltzmann distribution & continuous2discrete
#
# *Johannes Hörmann, Lukas Elflein, 2019*
#
# from continuous electrochemical double layer theory to discrete coordinate sets
# %%
# for dynamic module reload during testing, code modifications take immediate effect
# %load_ext autoreload
# %autoreload 2
# %%
# stretching notebook width across whole window
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# %%
# basics
import logging
import numpy as np
import scipy.constants as sc
import matplotlib.pyplot as plt
# %%
# sampling
from scipy import interpolate
from matscipy.electrochemistry import continuous2discrete
from matscipy.electrochemistry import get_histogram
from matscipy.electrochemistry.utility import plot_dist
# %%
# electrochemistry basics
from matscipy.electrochemistry import debye, ionic_strength
# %%
# Poisson-Bolzmann distribution
from matscipy.electrochemistry.poisson_boltzmann_distribution import gamma, potential, concentration, charge_density
# %%
# Poisson-Nernst-Planck solver
from matscipy.electrochemistry import PoissonNernstPlanckSystem
# %%
# 3rd party file output
import ase
import ase.io
# %%
# PoissonNernstPlanckSystem makes extensive use of Python's logging module
# configure logging: verbosity level and format as desired
standard_loglevel = logging.INFO
# standard_logformat = ''.join(("%(asctime)s",
# "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"))
standard_logformat = "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"
# reset logger if previously loaded
logging.shutdown()
logging.basicConfig(level=standard_loglevel,
format=standard_logformat,
datefmt='%m-%d %H:%M')
# in Jupyter notebooks, explicitly modifying the root logger necessary
logger = logging.getLogger()
logger.setLevel(standard_loglevel)
# remove all handlers
for h in logger.handlers: logger.removeHandler(h)
# create and append custom handles
ch = logging.StreamHandler()
formatter = logging.Formatter(standard_logformat)
ch.setFormatter(formatter)
ch.setLevel(standard_loglevel)
logger.addHandler(ch)
# %%
# Test 1
logging.info("Root logger")
# %%
# Test 2
logger.info("Root Logger")
# %%
# Debug Test
logging.debug("Root logger")
# %% [markdown]
# # The Poisson-Boltzman Distribution
# *Lukas Elflein, 2019*
#
# In order to understand lubrication better, we simulate thin layers of lubricant on a metallic surface, solvated in water.
# Different structures of lubricant films are created by varying parameters like their concentration and the charge of the surface.
# The lubricant is somewhat solvable in water, thus parts of the film will diffuse into the bulk water.
# Lubricant molecules are charged, and their distribution is roughly exponential.
#
# As simplification, we first create a solution of ions (Na+, purple; Cl-, green) in water (not shown).
# ![pic](https://i.ibb.co/Yh8DxVM/showpicture.png)
#
# Close to the positively charged metallic surface, the electric potential (red) will be highest, falling off exponentially when further away.
# This potential attracts negatively charged Chlorine ions, and pushes positively charged Natrium ions away, resulting in a higher (lower) concentration of Clorine (Natrium) near the surface.
#
#
# %% [markdown]
# To calculate this, we first need to find out how ions are distributed in solution.
# A good description of the concentrations of our ion species, $c_{\mathrm{Na}^+}$ and $c_{\mathrm{Cl}^-}$ or $c_i$ for $i \in \{\mathrm{Na}^+, \mathrm{Cl}^-\}$, is given by the solution to the Poisson-Boltzmann equation, here expressed with molar concentrations, Faraday constant and molar gas constant
#
# $
# \begin{align}
# c_i(x) &= c_i^\infty e^{-F \phi(x)/R T}\\
# \phi(x) &= \frac{2 R T}{F} \log\left(\frac{1 + \gamma e^{-\kappa x}}{1- \gamma e^{-\kappa z}}\right)
# \approx \frac{4 R T}{F} \gamma e^{-\kappa x} \\
# \gamma &= \tanh(\frac{F \phi_0}{4 R T})\\
# \kappa &= 1/\lambda_D\\
# \lambda_D &= \Big(\frac{\epsilon \epsilon_0 R T}{F^2 \sum_{i} c_i^\infty z_i^2} \Big)^\frac{1}{2}
# \end{align}
# $
#
# or alternatively expressed with number concentrations, elementary charge and Boltzmann constant instead
#
# $
# \begin{align}
# \rho_{i}(x) &= \rho_{i}^\infty e^{ -e \phi(z) \> / \> k_B T}\\
# \phi(x) &= \frac{2k_B T}{e} \> \log\left(\frac{1 + \gamma e^{-\kappa z}}{1- \gamma e^{-\kappa z}}\right)
# \approx \frac{4k_B T}{e} \gamma e^{-\kappa x} \\
# \gamma &= \tanh\left(\frac{e\phi_0}{4k_B T}\right)\\
# \kappa &= 1/\lambda_D\\
# \lambda_D &= \left(\frac{\epsilon \epsilon_0 k_B T}{\sum_{i} \rho_i^\infty e^2 z_i^2} \right)^\frac{1}{2}
# \end{align}
# $
#
# with
# * $x$: distance from interface $[\mathrm{m}]$
# * $\phi_0$: potential at the surface $[\mathrm{V}]$
# * $\phi(z)$: potential in the solution $[\mathrm{V}]$
# * $k_B$: Boltzmann Constant $[\mathrm{J}\> \mathrm{K}^{-1}]$
# * $R$: molar gas constant $[\mathrm{J}\> \mathrm{mol}^{-1}\> \mathrm{K}^{-1}]$
# * $T$: temperature $[\mathrm{K}]$
# * $e$: elementary charge (or Euler's constant when exponentiated) $[\mathrm{C}]$
# * $F$: Faraday constant $[\mathrm{C}\> \mathrm{mol}^{-1}]$
# * $\gamma$: term from Gouy-Chapmann theory
# * $\gamma \rightarrow 1$ for high potentials
# * $\phi(z) \approx \phi_0 e^{-\kappa z}$ for low potentials $\phi_0 \rightarrow 0$
# * $\lambda_D$: Debye Length ($\approx 34.0\>\mathrm{nm}$ for NaCl, $10^{-4} \mathrm{M}$, $25^\circ \mathrm{C}$)
# * $c{i}$: molar concentration of ion species $i$ $[\mathrm{mol}\> \mathrm{m}^{-3}]$
# * $c_{i}^\infty$: bulk molar concentration (at infinity, where the solution is homogeneous) $[\mathrm{mol}\> \mathrm{m}^{-3}]$
# * $\rho_{i}$: number concentration of ion species $i$ $[\mathrm{m}^{-3}]$
# * $\rho_{i}^\infty$: bulk number concentration $[\mathrm{m}^{-3}]$
# * $\epsilon$: relative permittivity of the solution $[1]$
# * $\epsilon_0$: vacuum permittivity $[\mathrm{F}\> \mathrm{m}^{-1}]$
# * $z_i$: number charge of species $i$ $[1]$
#
#
# These equations are implemented in `poisson_boltzmann_distribution.py`
# %%
# Notes on units
# universal gas constant R = N_A * k_B, [R] = J mol^-1 K^-1
# Faraday constant F = N_a e, [F] = C mol^-1
print("Note on constants and units:")
print("[F] = {}".format(sc.unit('Faraday constant')))
print("[R] = {}".format(sc.unit('molar gas constant')))
print("[e] = {}".format(sc.unit('elementary charge')))
print("[k_B] = {}".format(sc.unit('Boltzmann constant')))
print("F/R = {}".format(sc.value('Faraday constant')/sc.value('molar gas constant')))
print("e/k_B = {}".format(sc.value('elementary charge')/sc.value('Boltzmann constant')))
print("F/R = e/k_B !")
# %%
# Debye length of 0.1 mM NaCl aqueous solution
c = [0.1,0.1] # mM
z = [1,-1]
deb = debye(c,z)
print('Debye Length of 10^-4 M saltwater: {} nm (Target: 30.52 nm)'.format(round(deb/sc.nano, 2)))
# %%
C = np.logspace(-3, 3, 50) # mM,
# NaCl molar mass 58.443 g/mol and solubility limit in water at about 360 g/L
# means concentrations as high as a few M (mol/L), i.e. >> 1000 mM, are possible
debyes = np.array([debye([c,c], [1,-1]) for c in C])
fig, (ax1,ax2) = plt.subplots(
nrows=1, ncols=2, figsize=[12,4], constrained_layout=True)
ax1.set_xlabel('concentration (mM)') # mM is mol / m^3
ax1.set_ylabel('Debye length at 25° [nm]')
ax1.semilogx(C, debyes/sc.nano, marker='.')
ax2.set_xlabel('concentration (mM)') # mM is mol / m^3
ax2.set_ylabel('Debye length at 25° [nm]')
ax2.loglog(C, debyes/sc.nano, marker='.')
plt.show()
# %% [markdown]
# The debye length depends on the concentration of ions in solution, at low concentrations it becomes large. We can reproduce literature debye lengths with our function, so everything looks good.
#
# ## Gamma Function
#
# Next we calculate the gamma function $\gamma = \tanh(\frac{e\Psi(0)}{4k_B T})$
# %%
x = np.linspace(-0.5, 0.5, 40)
gammas = gamma(x, 298.15)
plt.xlabel('Potential $\phi$ (V)')
plt.ylabel('$\gamma(\phi)$ at 298.15 K (1)')
plt.plot(x, gammas, marker='o')
plt.show()
# %% [markdown]
# ## Potential
#
# We plug these two functions into the expression for the potential
#
# $\phi(z) = \frac{2k_B T}{e} \log\Big(\frac{1 + \gamma e^{-\kappa z}}{1- \gamma e^{-\kappa z}}\Big)
# \approx \frac{4k_B T}{e} \gamma e^{-\kappa z}$
# %%
x = np.linspace(0, 2*10**-7, 10000) # 200 nm
c = [0.1,0.1]
z = [1,-1]
psi = potential(x, c, z, u=0.05)
plt.xlabel('x (nm)')
plt.ylabel('Potential (V)')
plt.plot(x/sc.nano, psi, marker='')
plt.show()
# %% [markdown]
# The potential is smooth and looks roughly exponential. Everything good so far.
#
# ## Concentrations
#
# Now we obtain ion concentrations $c_i$ from the potential $\phi(x)$ via
#
# $c_{i}(x) = c_{i}^\infty e^{-F \phi(x) \> / \> R T}$
# %%
x = np.linspace(0, 100*10**-9, 2000)
c = [0.1,0.1]
z = [1,-1]
u = 0.05
phi = potential(x, c, z, u)
C = concentration(x, c, z, u)
rho = charge_density(x, c, z, u)
# %%
# potential and concentration distributions analytic solution
# based on Poisson-Boltzmann equation for 0.1 mM NaCl aqueous solution
# at interface
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
deb = debye(c, z)
fig, ax1 = plt.subplots(figsize=[18,5])
ax1.set_xlabel('x (nm)')
ax1.plot(x/sc.nano, phi, marker='', color='red', label='Potential', linewidth=1, linestyle='--')
ax1.set_ylabel('potential (V)')
ax1.axvline(x=deb/sc.nano, label='Debye Length', color='orange')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='Bulk concentration of Na+ ions', color='grey', linewidth=1, linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='green', label='Na+ ions')
ax2.plot(x/sc.nano, C[1], marker='', color='blue', label='Cl- ions')
ax2.set_ylabel('concentration (mM)')
ax3 = ax1.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density', color='grey', linewidth=1, linestyle='--')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
#fig.legend(loc='center')
ax2.legend(loc='upper right', bbox_to_anchor=(-0.1, 1.02),fontsize=15)
ax1.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1, -0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# Potential and concentrations behave as expected.
#
# ## Sampling
# First, convert the physical concentration distributions into a callable "probability density":
# %%
distributions = [interpolate.interp1d(x,c) for c in C]
# %% [markdown]
# Normalization is not necessary here. Now we can sample the distribution of our $Na^+$ ions in z-direction.
# %%
x = y = 50e-9
z = 100e-9
box = np.array([x, y, z])
sample_size = 1000
# %%
from scipy import optimize
# %%
na_coordinate_sample = continuous2discrete(
distribution=distributions[0], box=box, count=sample_size)
histx, histy, histz = get_histogram(na_coordinate_sample, box=box, n_bins=51)
plot_dist(histz, 'Distribution of Na+ ions in z-direction', reference_distribution=distributions[0])
# %%
cl_coordinate_sample = continuous2discrete(
distributions[1], box=box, count=sample_size)
histx, histy, histz = get_histogram(cl_coordinate_sample, box=box, n_bins=51)
plot_dist(histx, 'Distribution of Cl- ions in x-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[0])
plot_dist(histy, 'Distribution of Cl- ions in y-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[1])
plot_dist(histz, 'Distribution of Cl- ions in z-direction', reference_distribution=distributions[1])
# %% [markdown]
# ## Write to file
# To visualize our sampled coordinates, we utilize ASE to export it to some standard format, i.e. .xyz or LAMMPS data file.
# ASE speaks Ångström per default, thus we convert SI units:
# %%
na_atoms = ase.Atoms(
symbols='Na'*sample_size,
charges=[1]*sample_size,
positions=na_coordinate_sample/sc.angstrom,
cell=box/sc.angstrom,
pbc=[1,1,0])
cl_atoms = ase.Atoms(
symbols='Cl'*sample_size,
charges=[-1]*sample_size,
positions=cl_coordinate_sample/sc.angstrom,
cell=box/sc.angstrom,
pbc=[1,1,0])
system = na_atoms + cl_atoms
system
ase.io.write('NaCl_0.1mM_0.05V_50x50x100nm_at_interface_poisson_boltzmann_distributed.xyz',system,format='xyz')
# %%
# LAMMPS data format, units 'real', atom style 'full'
# before ASE 3.19.0b1, ASE had issues with exporting atom style 'full' in LAMMPS data file format, so do not expect this line to work for older ASE versions
ase.io.write('NaCl_0.1mM_0.05V_50x50x100nm_at_interface_poisson_boltzmann_distributed.lammps',system,format='lammps-data',units="real",atom_style='full')
# %% [markdown]
# # General Poisson-Nernst-Planck System
# %% [markdown]
# For general systems, i.e. a nanogap between two electrodes with not necessarily binary electrolyte, no closed analytic solution exists.
# Thus, we solve the full Poisson-Nernst-Planck system of equations.
# %% [markdown]
# A binary Poisson-Nernst-Planck system corresponds to the transport problem in semiconductor physics.
# In this context, Debye length, charge carrier densities and potential are related as follows.
# %% [markdown]
# ## Excursus: Transport problem in PNP junction (German)
# %% [markdown]
# ### Debye length
# %% [markdown]
# Woher kommt die Debye-Länge
#
# $$ \lambda = \sqrt{ \frac{\varepsilon \varepsilon_0 k_B T}{q^2 n_i} }$$
#
# als natürliche Längeneinheit des Transportptoblems?
#
# Hier ist $n_i$ eine Referenzladungsträgerdichte, in der Regel die intrinsische Ladungsträgerdichte.
# In dem Beispiel mit $N^+NN^+$-dotiertem Halbleiter erzeugen wir durch unterschiedliches Doping an den Rändern die erhöhte Donatorendichte $N_D^+ = 10^{20} \mathrm{cm}^{-3}$ und im mitteleren Bereich "Standarddonatorendichte" $N_D = 10^{18} \mathrm{cm}^{-3}$. Nun können wir als Referenz $n_i = N_D$ wählen und die Donatorendichten als $N_D = 1 \cdot n_i$ und $N_D^+ = 100 \cdot n_i$ ausdrücken. Diese normierte Konzentration nennen wir einfach $\tilde{N}_D$: $N_D = \tilde{N}_D \cdot n_i$.
#
# Ein ionisierter Donator trägt die Ladung $q$, ein Ladungsträger (in unserem Fall ein Elektron) trägt die Elementarladung $-q$. Die Raumladungsdichte $\rho$ in der Poissongleichung
#
# $$ \nabla^2 \varphi = - \frac{\rho}{\varepsilon \varepsilon_0}$$
#
# lässt sich also ganz einfach als $\rho = - (n - N_D) \cdot q = - (\tilde{n} - \tilde{N}_D) ~ n_i ~ q$ ausdrücken.
#
# Konventionell wird das Potential auf $u = \frac{\phi ~ q}{k_B ~ T}$ normiert. Die Poissongleichung nimmt damit die Form
#
# $$\frac{k_B ~ T}{q} \cdot \nabla^2 u = \frac{(\tilde{n} - \tilde{N}_D) ~ n_i ~ q }{\varepsilon \varepsilon_0}$$
#
# oder auch
#
# $$ \frac{\varepsilon ~ \varepsilon_0 ~ k_B ~ T}{q^2 n_i} \cdot \nabla^2 u = \lambda^2 \cdot \nabla^2 u = \tilde{n} - \tilde{N}_D$$
#
#
# %% [markdown]
# ### Dimensionless formulation
# %% [markdown]
# Poisson- und Drift-Diffusionsgleichung
#
# $$
# \lambda^2 \frac{\partial^2 u}{\partial x^2} = n - N_D
# $$
#
# $$
# \frac{\partial n}{\partial t} = - D_n \ \frac{\partial}{\partial x} \left( n \ \frac{\partial u}{\partial x} - \frac{\partial n}{\partial x} \right) + R
# $$
#
# Skaliert mit [l], [t]:
#
# $$
# \frac{\lambda^2}{[l]^2} \frac{\partial^2 u}{\partial \tilde{x}^2} = n - N
# $$
#
# und
#
# $$
# \frac{1}{[t]} \frac{\partial n}{\partial \tilde{t}} = - \frac{D_n}{[l]^2} \ \frac{\partial}{\partial x} \left( n \ \frac{\partial u}{\partial x} - \frac{\partial n}{\partial x} \right) + R
# $$
#
# oder
#
# $$
# \frac{\partial n}{\partial \tilde{t}} = - \tilde{D}_n \ \frac{\partial}{\partial x} \left( n \ \frac{\partial u}{\partial x} - \frac{\partial n}{\partial x} \right) + \tilde{R}
# $$
#
# mit
#
# $$
# \tilde{D}_n = D_n \frac{[t]}{[l]^2} \Leftrightarrow [t] = [l]^2 \ \frac{ \tilde{D}_n } { D_n }
# $$
#
# und
#
# $$ \tilde{R} = \frac{n - N_D}{\tilde{\tau}}$$
#
# mit $\tilde{\tau} = \tau / [t]$.
#
# $\tilde{\lambda} = 1$ und $\tilde{D_n} = 1$ werden mit
# $[l] = \lambda$ und $[t] = \frac{\lambda^2}{D_n}$ erreicht:
# %% [markdown]
# ### Discretization
# %% [markdown]
# Naive Diskretisierung (skaliert):
#
# $$ \frac{1}{\Delta x^2} ( u_{i+1}-2u_i+u_{i-1} ) = n_i - N_i $$
#
# $$ \frac{1}{\Delta t} ( n_{i,j+1} - n_{i,j} ) = - \frac{1}{\Delta x^2} \cdot \left[ \frac{1}{4} (n_{i+1} - n_{i-1}) (u_{i+1} - u_{i-1}) + n_i ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1} - 2 n_i + n_{i-1} ) \right] + \frac{ n_i - N_i}{ \tilde{\tau} } $$
#
# Stationär:
#
# $$
# u_{i+1}-2u_i+u_{i-1} - \Delta x^2 \cdot n_i + \Delta x^2 \cdot N_i = 0
# $$
#
# und
#
# $$
# \frac{1}{4} (n_{i+1} - n_{i-1}) (u_{i+1} - u_{i-1}) + n_i ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1} - 2 n_i + n_{i-1} ) - \Delta x^2 \cdot \frac{ n_i - N_i}{ \tilde{\tau} } = 0
# $$
# %% [markdown]
# ### Newton-Iteration für gekoppeltes nicht-lineares Gleichungssystem
# %% [markdown]
# Idee: Löse nicht-lineares Finite-Differenzen-Gleichungssystem über Newton-Verfahren
#
# $$ \vec{F}(\vec{x}_{k+1}) = F(\vec{x}_k + \Delta \vec{x}_k) \approx F(\vec{x}_k) + \mathbf{J_F}(\vec{x}_k) \cdot \Delta \vec{x}_k + \mathcal{O}(\Delta x^2)$$
#
# mit Unbekannter $\vec{x_k} = \{u_1^k, \dots, u_N^k, n_1^k, \dots, n_N^k\}$ und damit
#
# $$ \Rightarrow \Delta \vec{x}_k = - \mathbf{J}_F^{-1} ~ F(\vec{x}_k)$$
#
# wobei die Jacobi-Matrix $2N \times 2N$ Einträge
#
# $$ \mathbf{J}_{ij}(\vec{x}_k) = \frac{\partial F_i}{\partial x_j} (\vec{x}_k) $$
#
# besitzt, die bei jedem Iterationsschritt für $\vec{x}_k$ ausgewertet werden.
# Der tatsächliche Aufwand liegt in der Invertierung der Jacobi-Matrix, um in jeder Iteration $k$ den Korrekturschritt $\Delta \vec{x}_k$ zu finden.m
# %% [markdown]
# $F(x)$ wird wie unten definiert als:
#
# $$
# u_{i+1}-2u_i+u_{i-1} - \Delta x^2 \cdot n_i + \Delta x^2 \cdot N_i = 0
# $$
#
# und
#
# $$
# \frac{1}{4} (n_{i+1} - n_{i-1}) (u_{i+1} - u_{i-1}) + n_i ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1} - 2 n_i + n_{i-1} ) - \Delta x^2 \cdot \frac{ n_i - N_i}{ \tilde{\tau} } = 0
# $$
# %% [markdown]
# ### Controlled-Volume
# %% [markdown]
# Drücke nicht-linearen Teil der Transportgleichung (genauer, des Flusses) über Bernoulli-Funktionen
#
# $$ B(x) = \frac{x}{\exp(x)-1} $$
#
# aus (siehe Vorlesungsskript). Damit wir in der Nähe von 0 nicht "in die Bredouille geraten", verwenden wir hier lieber die Taylorentwicklung. In der Literatur (Selbherr, S. Analysis and Simulation of Semiconductor Devices, Spriger 1984) wird eine noch aufwendigere stückweise Definition empfohlen, allerdings werden wir im Folgenden sehen, dass unser Ansatz für dieses stationäre Problem genügt.
#
# %% [markdown]
# ## Implementation for Poisson-Nernst-Planck system
# %% [markdown]
# Poisson-Nernst-Planck system for $k = {1 \dots M}$ ion species in dimensionless formulation
#
# $$ \nabla^2 u + \rho(n_{1},\dots,n_{M}) = 0 $$
#
# $$ \nabla^2 n_k + \nabla ( z_k n_k \nabla u ) = 0 \quad \text{for} \quad k = 1 \dots M $$
#
# yields a naive finite difference discretization on $i = {1 \dots N}$ grid points for $k = {1 \dots M}$ ion species
#
# $$ \frac{1}{\Delta x^2} ( u_{i+1}-2u_i+u_{i-1} ) + \frac{1}{2} \sum_{k=1}^M z_k n_{i,k} = 0 $$
#
# $$ - \frac{1}{\Delta x^2} \cdot \left[ \frac{1}{4} z_k (n_{i+1,k} - n_{i-1,k}) (u_{i+1} - u_{i-1}) + z_k n_{i,k} ( u_{i+1} - 2 u_i + u_{i-1} ) + ( n_{i+1,k} - 2 n_{i,k} + n_{i-1,k} ) \right] $$
#
# or rearranged
#
# $$ u_{i+1}-2 u_i+u_{i-1} + \Delta x^2 \frac{1}{2} \sum_{k=1}^M z_k n_{i,k} = 0 $$
#
# and
#
# $$
# \frac{1}{4} z_k (n_{i+1,k} - n_{i-1,k}) (u_{i+1,k} - u_{i-1,k}) + z_k n_{i,k} ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1,k} - 2 n_{i,k} + n_{i-1,k} ) = 0
# $$
# %% [markdown]
# ### Controlled Volumes, 1D
# %% [markdown]
# Finite differences do not converge in our non-linear systems. Instead, we express non-linear part of the Nernts-Planck equations with Bernoulli function (Selberherr, S. Analysis and Simulation of Semiconductor Devices, Spriger 1984)
#
# $$ B(x) = \frac{x}{\exp(x)-1} $$
# %%
def B(x):
return np.where( np.abs(x) < 1e-9,
1 - x/2 + x**2/12 - x**4/720, # Taylor
x / ( np.exp(x) - 1 ) )
# %%
xB = np.arange(-10,10,0.1)
# %%
plt.plot( xB ,B( xB ), label="$B(x)$")
plt.plot( xB, - B(-xB), label="$-B(-x)$")
plt.plot( xB, B(xB)-B(-xB), label="$B(x)-B(-x)$")
plt.legend()
# %% [markdown]
# Looking at (dimensionless) flux $j_k$ throgh segment $k$ in between grid points $i$ and $j$,
#
# $$ j_k = - \frac{dn}{dx} - z n \frac{du}{dx} $$
#
# for an ion species with number charge $z$ and (dimensionless) concentration $n$,
# we assume (dimensionless) potential $u$ to behave linearly within this segment. The linear expression
#
# $$ u = \frac{u_j - u_i}{L_k} \cdot \xi_k + u_i = a_k \xi_k + u_i $$
#
# with the segment's length $L_k = \Delta x$ for uniform discretization, $\xi_k = x - x_i$ and proportionality factor $a_k = \frac{u_j - u_i}{L_k}$ leadsd to a flux
#
# $$ j_k = - \frac{dn}{d\xi} - z a_k n $$
#
# solvable for $v$ via
#
# $$ \frac{dn}{d\xi} = - z a_k n - j_k $$
#
# or
#
# $$ \frac{dn}{z a_k n + j_k} = - d\xi \text{.} $$
#
# We intergate from grid point $i$ to $j$
#
# $$ \int_{n_i}^{n_j} \frac{1}{z a_k n + j_k} dn = - L_k $$
#
# and find
#
# $$ \frac{1}{(z a_k)} \left[ \ln(j_k + z a_k n) \right]_{n_i}^{n^j} = - L_k $$
#
# or
#
# $$ \ln(j_k + z a_k n_j) - \ln(j_k + z a_k n_i) = - z a_k L_k $$
#
# which we solve for $j_k$ by rearranging
#
# $$ \frac{j_k + z a_k n_j}{j_k + z a_k n_i} = e^{- z a_k L_k} $$
#
# $$ j_k + z a_k n_j = (j_k + z a_k n_i) e^{- z a_k L_k} $$
#
# $$ j_k ( 1 - e^{- z a_k L_k} ) = - z a_k n_j + z a_k n_i e^{- z a_k L_k} $$
#
# $$j_k = \frac{z a_k n_j}{e^{- z a_k L_k} - 1} + \frac{ z a_k n_i e^{- z a_k L_k}}{ 1 - e^{- z a_k L_k}}$$
#
# $$j_k = \frac{1}{L_k} \cdot \left[ \frac{z a_k L_k n_j}{e^{- z a_k L_k} - 1} + \frac{ z a_k L_k n_i }{ e^{z a_k L_k} - 1} \right] $$
#
# or with $B(x) = \frac{x}{e^x-1}$ expressed as
#
# $$j_k = \frac{1}{L_k} \cdot \left[ - n_j B( - z a_k L_k ) + n_i B( z a_k L_k) \right] $$
#
# and resubstituting $a_k = \frac{u_j - u_i}{L_k}$ as
#
# $$j_k = - \frac{1}{L_k} \cdot \left[ n_j B( z [u_i - u_j] ) - n_i B( z [u_j - u_i] ) \right] \ \text{.}$$
#
# When employing our 1D uniform grid with $j_k = j_{k-1}$ for all $k = 1 \dots N$,
#
# $$ j_k \Delta x = n_{i+1} B( z [u_i - u_{i+1}] ) - n_i B( z [u_{i+1} - u_i] ) $$
#
# and
#
# $$ j_{k-1} \Delta x = n_i B( z [u_{i-1} - u_i] ) - n_{i-1} B( z [u_i - u_{i-1}] ) $$
#
# require
#
# $$ n_{i+1} B( z [u_i - u_{i+1}] ) - n_i \left( B( z [u_{i+1} - u_i] ) + B( z [u_{i-1} - u_i] ) \right) + n_{i-1} B( z [u_i - u_{i-1}] ) = 0 $$
# %% [markdown]
# ## Test case 1: PNP interface system, 0.1 mM NaCl, positive potential u = 0.05 V
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=1e-07
delta_u=0.05
# %%
# define desired system
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# constructor takes keyword arguments
# c=array([0.1, 0.1]), z=array([ 1, -1]), L=1e-07, T=298.15, delta_u=0.05, relative_permittivity=79, vacuum_permittivity=8.854187817620389e-12, R=8.3144598, F=96485.33289
# with default values set for 0.1 mM NaCl aqueous solution across 100 nm and 0.05 V potential drop
# %%
pnp.useStandardInterfaceBC()
# %%
pnp.output = True # let's Newton solver display convergence plots
uij, nij, lamj = pnp.solve()
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interface and at open right hand side
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interface and at open right hand side
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interface and at open right hand side
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# ## Test case 2: PNP interface system, 0.1 mM NaCl, negative potential u = -0.05 V, analytical solution as initial values
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=1e-07
delta_u=-0.05
# %%
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# %%
pnp.useStandardInterfaceBC()
# %%
pnp.init()
# %%
# initial config
x = np.linspace(0, pnp.L, pnp.Ni)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
# %%
pnp.ni0 = C / pnp.c_unit # manually remove dimensions from analyatical solution
# %%
ui0 = pnp.initial_values()
# %%
plt.plot(ui0) # solution to linear Poisson equation under assumption of fixed charge density distribution
# %%
pnp.output = True # let's Newton solver display convergence plots
uij, nij, lamj = pnp.solve() # no faster convergence than above, compare convergence plots for test case 1
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,1) )
# %% [markdown]
# #### Cation concentration at interface and at open right hand side
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interface and at open right hand side
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# ## Test case 3: PNP interface system, 0.1 mM NaCl, positive potential u = 0.05 V, 200 nm domain
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=2e-07
delta_u=0.05
# %%
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# %%
pnp.useStandardInterfaceBC()
# %%
pnp.init()
# %%
pnp.output = True
uij, nij, lamj = pnp.solve()
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# Analytic PB and approximate PNP solution indistinguishable.
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interface and at open right hand side
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interface and at open right hand side
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interface and at open right hand side
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# ## Test case 4: 1D electrochemical cell, 0.1 mM NaCl, positive potential u = 0.05 V, 100 nm domain
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=1e-07
delta_u=0.05
# %%
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# %%
pnp.useStandardCellBC()
# %%
pnp.init()
# %%
pnp.output = True
xij = pnp.solve()
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.set_xlabel('z [nm]')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_xlabel('z [nm]')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interfaces
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interfaces
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interfaces
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# #### Equilibrium cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xij1,0,0), pnp.numberConservationConstraint(pnp.xij1,1,0) )
# %% [markdown]
# #### Initial cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xi0,0,0), pnp.numberConservationConstraint(pnp.xi0,1,0) )
# %% [markdown]
# #### Species conservation
# %%
(pnp.numberConservationConstraint(pnp.xij1,0,
pnp.numberConservationConstraint(pnp.xi0,0,0)),
pnp.numberConservationConstraint(pnp.xij1,1,
pnp.numberConservationConstraint(pnp.xi0,1,0)) )
# %% [markdown]
# ## Test case 5: 1D electrochemical cell, 0.1 mM NaCl, positive potential u = 0.05 V, 100 nm domain, 0.5 nm compact layer
# %% [markdown]
# At high potentials or bulk concentrations, pure PNP systems yield unphysically high concentrations and steep gradients close to the boundary, as an ion's finite size is not accounted for.
# In addition, high gradients can lead to convergence issues. This problem can be alleviated by assuming a Stern layer (compact layer) at the interface.
# This compact layer is parametrized by its thickness $\lambda_S$ and can be treated explicitly by prescribing a linear potential regime across the compact layer region, or by
# the implicit parametrization of a compact layer with uniform charge density as Robin boundary conditions on the potential.
# %%
c = [1000,1000] # high concentrations close to NaCl's solubility limit in water
delta_u = 0.05
L = 30e-10 # tiny gap of 3 nm
lambda_S = 5e-10 # 0.5 nm Stern layer
# %%
pnp_no_compact_layer = PoissonNernstPlanckSystem(c,z,L,delta_u=delta_u, e=1e-12)
# %%
pnp_with_explicit_compact_layer = PoissonNernstPlanckSystem(c,z,L, delta_u=delta_u,lambda_S=lambda_S, e=1e-12)
# %%
pnp_with_implicit_compact_layer = PoissonNernstPlanckSystem(c,z,L, delta_u=delta_u,lambda_S=lambda_S, e=1e-12)
# %%
pnp_no_compact_layer.useStandardCellBC()
# %%
pnp_with_explicit_compact_layer.useSternLayerCellBC(implicit=False)
# %%
pnp_with_implicit_compact_layer.useSternLayerCellBC(implicit=True)
# %%
pnp_no_compact_layer.init()
# %%
pnp_with_explicit_compact_layer.init()
# %%
pnp_with_implicit_compact_layer.init()
# %%
pnp_no_compact_layer.output = True
xij_no_compact_layer = pnp_no_compact_layer.solve()
# %%
pnp_with_explicit_compact_layer.output = True
xij_with_explicit_compact_layer = pnp_with_explicit_compact_layer.solve()
# %%
pnp_with_implicit_compact_layer.output = True
xij_with_implicit_compact_layer = pnp_with_implicit_compact_layer.solve()
# %%
x = np.linspace(0,L,100)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[18,10])
# 1 - potentials
ax1.axvline(x=deb/sc.nano, label='Debye Length', color='grey', linestyle=':')
ax1.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.potential, marker='', color='tab:red', label='potential, without compact layer', linewidth=1, linestyle='-')
ax1.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.potential, marker='', color='tab:red', label='potential, with explicit compact layer', linewidth=1, linestyle='--')
ax1.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.potential, marker='', color='tab:red', label='potential, with Robin BC', linewidth=2, linestyle=':')
# 2 - conencentratiosn
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax2.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, without compact layer', linewidth=2, linestyle='-')
ax2.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, without compact layer', linewidth=2, linestyle='-')
ax2.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with explicit compact layer', linewidth=2, linestyle='--')
ax2.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with explicit compact layer', linewidth=2, linestyle='--')
ax2.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with Robin BC', linewidth=2, linestyle=':')
ax2.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with Robin BC', linewidth=2, linestyle=':')
# 3 - charge densities
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.charge_density, label='charge density, without compact layer', color='grey', linewidth=1, linestyle='-')
ax3.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.charge_density, label='charge density, with explicit compact layer', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.charge_density, label='charge density, with Robin BC', color='grey', linewidth=1, linestyle=':')
# 4 - concentrations, semi log
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax4.semilogy(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, without compact layer', linewidth=2, linestyle='-')
ax4.semilogy(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, without compact layer', linewidth=2, linestyle='-')
ax4.semilogy(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with explicit compact layer', linewidth=2, linestyle='--')
ax4.semilogy(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with explicit compact layer', linewidth=2, linestyle='--')
ax4.semilogy(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with Robin BC', linewidth=2, linestyle=':')
ax4.semilogy(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with Robin BC', linewidth=2, linestyle=':')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
#ax3.yaxis.set_major_formatter(formatter)
ax3.ticklabel_format(axis='y', style='sci', scilimits=(-2,10), useOffset=False, useMathText=False)
ax4.set_xlabel('z [nm]')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=12)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=12)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=12)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp_no_compact_layer.potential[0],pnp_no_compact_layer.potential[-1])
# %%
(pnp_with_explicit_compact_layer.potential[0],pnp_with_explicit_compact_layer.potential[-1])
# %%
(pnp_with_implicit_compact_layer.potential[0],pnp_with_implicit_compact_layer.potential[-1])
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp_no_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,0), pnp_no_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,0) )
# %%
( pnp_with_explicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,0), pnp_with_explicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,0) )
# %%
( pnp_with_implicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,0), pnp_with_implicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,0) )
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp_no_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,1), pnp_no_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,1) )
# %%
( pnp_with_explicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,1), pnp_with_explicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,1) )
# %%
( pnp_with_implicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,1), pnp_with_implicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,1) )
# %% [markdown]
# #### Cation concentration at interfaces
# %%
(pnp_no_compact_layer.concentration[0,0],pnp_no_compact_layer.concentration[0,-1])
# %%
(pnp_with_explicit_compact_layer.concentration[0,0],pnp_with_explicit_compact_layer.concentration[0,-1])
# %%
(pnp_with_implicit_compact_layer.concentration[0,0],pnp_with_implicit_compact_layer.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interfaces
# %%
(pnp_no_compact_layer.concentration[1,0],pnp_no_compact_layer.concentration[1,-1])
# %%
(pnp_with_explicit_compact_layer.concentration[1,0],pnp_with_explicit_compact_layer.concentration[1,-1])
# %%
(pnp_with_implicit_compact_layer.concentration[1,0],pnp_with_implicit_compact_layer.concentration[1,-1])
# %% [markdown]
# #### Equilibrium cation and anion amount
# %%
( pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,0,0), pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,1,0) )
# %%
( pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,0,0), pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,1,0) )
# %%
( pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,0,0), pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,1,0) )
# %% [markdown]
# #### Initial cation and anion amount
# %%
( pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,0,0), pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,1,0) )
# %%
( pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,0,0), pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,1,0) )
# %%
( pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,0,0), pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,1,0) )
# %% [markdown]
# #### Species conservation
# %%
(pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,0,
pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,0,0)),
pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,1,
pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,1,0)) )
# %%
(pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,0,
pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,0,0)),
pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,1,
pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,1,0)) )
# %%
(pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,0,
pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,0,0)),
pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,1,
pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,1,0)) )
# %% [markdown]
# ## Sample application of 1D electrochemical cell model:
# %% [markdown]
# We want to fill a gap of 3 nm between gold electrodes with 0.2 wt % NaCl aqueous solution, apply a small potential difference and generate an initial configuration for LAMMPS within a cubic box:
# %%
box_Ang=np.array([50.,50.,50.]) # Angstrom
# %%
box_m = box_Ang*sc.angstrom
# %%
box_m
# %%
vol_AngCube = box_Ang.prod() # Angstrom^3
# %%
vol_mCube = vol_AngCube*sc.angstrom**3
# %% [markdown]
# With a concentration of 0.2 wt %, we are close to NaCl's solubility limit in water.
# We estimate molar concentrations and atom numbers in our box:
# %%
# enter number between 0 ... 0.2
weight_concentration_NaCl = 0.2 # wt %
# calculate saline mass density g/cm³
saline_mass_density_kg_per_L = 1 + weight_concentration_NaCl * 0.15 / 0.20 # g / cm^3, kg / L
# see https://www.engineeringtoolbox.com/density-aqueous-solution-inorganic-sodium-salt-concentration-d_1957.html
# %%
saline_mass_density_g_per_L = saline_mass_density_kg_per_L*sc.kilo
# %%
molar_mass_H2O = 18.015 # g / mol
molar_mass_NaCl = 58.44 # g / mol
# %%
cNaCl_M = weight_concentration_NaCl*saline_mass_density_g_per_L/molar_mass_NaCl # mol L^-1
# %%
cNaCl_mM = np.round(cNaCl_M/sc.milli) # mM
# %%
cNaCl_mM
# %%
n_NaCl = np.round(cNaCl_mM*vol_mCube*sc.value('Avogadro constant'))
# %%
n_NaCl
# %%
c = [cNaCl_mM,cNaCl_mM]
z = [1,-1]
L=box_m[2]
lamda_S = 2.0e-10
delta_u = 0.5
# %%
pnp = PoissonNernstPlanckSystem(c,z,L, lambda_S=lambda_S, delta_u=delta_u, N=200, maxit=20, e=1e-6)
# %%
pnp.useSternLayerCellBC()
# %%
pnp.init()
# %%
pnp.output = True
xij = pnp.solve()
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.set_xlabel('z [nm]')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_xlabel('z [nm]')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interfaces
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interfaces
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interfaces
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# #### Equilibrium cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xij1,0,0), pnp.numberConservationConstraint(pnp.xij1,1,0) )
# %% [markdown]
# #### Initial cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xi0,0,0), pnp.numberConservationConstraint(pnp.xi0,1,0) )
# %% [markdown]
# #### Species conservation
# %%
(pnp.numberConservationConstraint(pnp.xij1,0,
pnp.numberConservationConstraint(pnp.xi0,0,0)),
pnp.numberConservationConstraint(pnp.xij1,1,
pnp.numberConservationConstraint(pnp.xi0,1,0)) )
# %% [markdown]
# ## Sampling
# First, convert the physical concentration distributions into a callable "probability density":
# %%
pnp.concentration.shape
# %%
distributions = [interpolate.interp1d(pnp.grid,pnp.concentration[i,:]) for i in range(pnp.concentration.shape[0])]
# %% [markdown]
# Normalization is not necessary here. Now we can sample the distribution of our $Na^+$ ions in z-direction.
# %%
na_coordinate_sample = continuous2discrete(
distribution=distributions[0], box=box_m, count=n_NaCl)
histx, histy, histz = get_histogram(na_coordinate_sample, box=box_m, n_bins=51)
plot_dist(histz, 'Distribution of Na+ ions in z-direction', reference_distribution=distributions[0])
# %%
cl_coordinate_sample = continuous2discrete(
distributions[1], box=box_m, count=n_NaCl)
histx, histy, histz = get_histogram(cl_coordinate_sample, box=box_m, n_bins=51)
plot_dist(histx, 'Distribution of Cl- ions in x-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[0])
plot_dist(histy, 'Distribution of Cl- ions in y-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[1])
plot_dist(histz, 'Distribution of Cl- ions in z-direction', reference_distribution=distributions[1])
# %% [markdown]
# ## Write to file
# To visualize our sampled coordinates, we utilize ASE to export it to some standard format, i.e. .xyz or LAMMPS data file.
# ASE speaks Ångström per default, thus we convert SI units:
# %%
sample_size = int(n_NaCl)
# %%
sample_size
# %%
na_atoms = ase.Atoms(
symbols='Na'*sample_size,
charges=[1]*sample_size,
positions=na_coordinate_sample/sc.angstrom,
cell=box_Ang,
pbc=[1,1,0])
cl_atoms = ase.Atoms(
symbols='Cl'*sample_size,
charges=[-1]*sample_size,
positions=cl_coordinate_sample/sc.angstrom,
cell=box_Ang,
pbc=[1,1,0])
system = na_atoms + cl_atoms
system
ase.io.write('NaCl_c_4_M_u_0.5_V_box_5x5x10nm_lambda_S_2_Ang.xyz',system,format='xyz')
# %%
# LAMMPS data format, units 'real', atom style 'full'
# before ASE 3.19.0b1, ASE had issues with exporting atom style 'full' in LAMMPS data file format, so do not expect this line to work for older ASE versions
ase.io.write('NaCl_c_4_M_u_0.5_V_box_5x5x10nm_lambda_S_2_Ang.lammps',system,format='lammps-data',units="real",atom_style='full')
| gpl-2.0 |
DangoMelon0701/PyRemote-Sensing | Scatter Plot/np_scatter_plot.py | 1 | 7491 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 06 13:17:45 2016
@author: gerar
"""
# Este archivo toma como datos de entrada un ASCII
# con 5 Columnas
# Date_MODIS Time_MODIS AOD_MODIS Time_AERONET AOD_AERONET
# Se puede usar en conjunto con el codigo llamado
# pd_match_data.py
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
#from datetime import timedelta as delt
from scipy.stats.stats import pearsonr
#%%
#Defino los valores máximos de los ejes en el plot
x_limit = 1
y_limit = 1
#%%
#Datos de la estacion|
AERONET_station = "Granada"
years_range="2002-2017"
x_label = r"AERONET Level 2.0 AOD $\mu m$"
y_label = "MODIS TERRA AOD at 10km"
#%%
#Cantidad de pixeles a tomar en promedio
grid = "3x3"
#%%
#Defino la fuente estilo LaTeX
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
#%%
#ref: https://stackoverflow.com/questions/25271388/python-percentage-rounding
def round_to_100_percent(number_set, digit_after_decimal=2):
"""
This function take a list of number and return a list of percentage, which represents the portion of each number in sum of all numbers
Moreover, those percentages are adding up to 100%!!!
Notice: the algorithm we are using here is 'Largest Remainder'
The down-side is that the results won't be accurate, but they are never accurate anyway:)
"""
unround_numbers = [x / float(sum(number_set)) * 100 * 10 ** digit_after_decimal for x in number_set]
decimal_part_with_index = sorted([(index, unround_numbers[index] % 1) for index in range(len(unround_numbers))], key=lambda y: y[1], reverse=True)
remainder = 100 * 10 ** digit_after_decimal - sum([int(x) for x in unround_numbers])
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x) / float(10 ** digit_after_decimal) for x in unround_numbers]
#%%
#Defino la funcion para rmse y mae
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def mae(predictions,targets):
return np.abs((predictions - targets)).mean()
#%%
#Encuentro el archivo en formato txt a plottear
file_to_plot = [x for x in os.listdir(os.getcwd()) \
if x.endswith("{}{}_matched_data.txt".format(\
AERONET_station.lower().replace(" ",""),grid))]
#Abro el archivo y guardo los datos en numpy.arrays (dtype=float)
modis_data,aeronet_data = np.loadtxt(file_to_plot[0],skiprows = 1,usecols=(2,4),unpack=True)
#Leo la data para graficar una serie de tiempo
data = pd.read_table(file_to_plot[0], parse_dates=[0],infer_datetime_format = True,usecols=(0,2,4)) #, parse_dates=[0],infer_datetime_format = True
data = data.dropna() #[pd.notnull(data['AOD_AERONET'])]
data = data.set_index('Date_MODIS')
print "Archivo Leido\nProcesando ..."
np.seterr(all='ignore')
#%%
#Realizo los calculos del EE
_ee = np.abs(0.05 + 0.15*aeronet_data)
ee_plus = aeronet_data + _ee
ee_minus = aeronet_data - _ee
#1- Dentro del EE:
within_ee = modis_data[np.logical_and(ee_minus<modis_data,modis_data<ee_plus)]
xwithin_ee = aeronet_data[np.logical_and(ee_minus<modis_data,modis_data<ee_plus)]
#2- Por encima del EE:
above_ee = modis_data[ee_plus<modis_data]
xabove_ee = aeronet_data[ee_plus<modis_data]
#3- Por debajo del EE:
below_ee = modis_data[modis_data<ee_minus]
xbelow_ee = aeronet_data[modis_data<ee_minus]
#--Total de Puntos--
n_tot=len(above_ee)+len(within_ee)+len(below_ee)
#%%
#Calculo los parametros de las rectas EE+ y EE-
idx = np.isfinite(aeronet_data)
m_plus,b_plus = np.polyfit(aeronet_data[idx],ee_plus[idx],1)
m_minus,b_minus = np.polyfit(aeronet_data[idx],ee_minus[idx],1)
#%%
#Calculo del R del grafio
r_coef = pearsonr(aeronet_data[idx],modis_data[idx])
#Calculo del RMSE
rmse_value = rmse(modis_data[idx],aeronet_data[idx])
#Calculo del MAE
mae_value = mae(modis_data[idx],aeronet_data[idx])
#%%
#Calculo la FOE para otra grafica
FOE = (modis_data-aeronet_data) / _ee
FOE = FOE[np.isfinite(FOE)]
data['FOE']=FOE
data = data.sort_index()
#%%
print "Realizando la grafica...\n"
#Puntos para graficar la linea 1:1
_11line = np.linspace(0,x_limit,2)
#Realizamos el scatter plot del pd.dataframe
fig, axes = plt.subplots(figsize=(7,7))
#axes.scatter(aeronet_data,modis_data)
#Calculamos los porcentajes:
len_list = [len(above_ee),len(within_ee),len(below_ee)]
percnt_list = round_to_100_percent(len_list)
percnt_list = [x/100. for x in percnt_list]
#Plottear la data: above EE, within EE y below EE
axes.scatter(xabove_ee,above_ee,edgecolors="black",linewidth=1,s=10,c="blue",label="%\tAbove EE\t=\t{:.2%}".format(percnt_list[0]))
axes.scatter(xwithin_ee,within_ee,edgecolors="black",linewidth=1,s=10,c="red",label="%\tWithin EE\t=\t{:.2%}".format(percnt_list[1]))
axes.scatter(xbelow_ee,below_ee,edgecolors="black",linewidth=1,s=10,c="green",label="%\tBelow EE\t=\t{:.2%}".format(percnt_list[2]))
#Inserto la leyenda del plot
axes.legend(loc=2,scatterpoints=1,labelspacing=0.5,fontsize=13.5,handlelength=0.6,frameon=False)
#Dibujo las lineas 1:1, EE+ y EE-
axes.plot(_11line,_11line, color ='black',lw=0.6)
axes.plot(_11line,m_plus*_11line+b_plus,color = 'black', ls='--')
axes.plot(_11line,m_minus*_11line+b_minus,color = 'black', ls='--')
#Nombre de los ejes
axes.set_ylabel(y_label,fontsize=19)
axes.set_xlabel(x_label,fontsize=19)
#Nombre de la estación con el rango de años
axes.text(0.75,0.2,"({})\n\n({})".format(AERONET_station,years_range),\
ha="center",va="center",fontsize=17,transform=axes.transAxes)
axes.text(0.075,0.785,"(R={:.3f}, RMSE={:.3f})".format(r_coef[0],rmse_value),fontsize=13.5,\
transform=axes.transAxes)
axes.text(0.075,0.74,"(MAE={:.3f}, N={})".format(mae_value,n_tot),fontsize=13.5,\
transform=axes.transAxes)
#Limites del grafico con grillas
axes.axis([0,x_limit,0,y_limit],fontsize=10)
axes.tick_params(labelsize=13)
axes.grid(linestyle='--')
print "Guardando las imagenes en el directorio actual"
##Guardo el archivo en jpeg
fig.savefig("{}_AOD.jpeg".format(AERONET_station),dpi=1000,bbox_inches='tight')
print "\nScatter Plot Guardado"
##%%
##Obtengo las fechas
#dates = data.index.to_pydatetime()
#
##Grafico la serie de tiempo
#fig2, axes2 = plt.subplots(figsize=(8,3))
#
##Limites del eje y con saltos a mostrar
#axes2.set_ylim([-2.5,2.5])
##axes2.set_xlim([data.index.to_pydatetime().min(),data.index.to_pydatetime().max()])
#major_yticks=np.arange(-2.5,3,0.5)
#axes2.set_yticks(major_yticks)
#
##Linea horizontal
#t_delta = delt(20)
#axes2.hlines([1,0,-1],xmin=dates.min()-t_delta,xmax=dates.max()+t_delta,linestyle='dotted')
#
##Datos
#axes2.plot(dates,data['FOE'],'v-',linewidth='0',label='MYD04_L2 AOD')
#axes2.legend(loc=9,numpoints=1,fontsize = 9,fancybox=True)
#
##Nombre de los ejes
##axes2.set_xlabel("Fecha",fontsize=12)
#axes2.set_ylabel("Fraccion del Error Esperado (FOE)",fontsize=12)
#axes2.set_title("{} {}".format(AERONET_station,years_range))
#
#fig2.autofmt_xdate()
#
#
#
#plt.show()
###Guardo el archivo en jpeg
#fig2.savefig("{}_FOE.jpeg".format(AERONET_station),dpi=1000)
#print "\nFOE guardado"
#print "Proceso Terminado\n"
##os.system("pause")
| mit |
Eksmo/calibre | setup/installer/linux/freeze.py | 2 | 11661 | #!/usr/bin/env python
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Create linux binary.
'''
from setup import Command, __version__, __appname__
class LinuxFreeze(Command):
description = 'Create frozen linux binary'
def run(self, opts):
import glob, sys, tarfile, os, textwrap, shutil, platform
from contextlib import closing
from cx_Freeze import Executable, setup
from calibre.linux import entry_points
from calibre import walk
is64bit = platform.architecture()[0] == '64bit'
arch = 'x86_64' if is64bit else 'i686'
ffi = '/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.2/libffi.so.4' if is64bit else '/usr/lib/gcc/i686-pc-linux-gnu/4.4.1/libffi.so.4'
stdcpp = '/usr/lib/gcc/%s-pc-linux-gnu/%s/libstdc++.so.6'%(arch, '4.4.2'
if is64bit else '4.4.1')
QTDIR = '/usr/lib/qt4'
QTDLLS = ('QtCore', 'QtGui', 'QtNetwork', 'QtSvg', 'QtXml',
'QtWebKit', 'QtDBus', 'QtXmlPatterns')
binary_excludes = ['libGLcore*', 'libGL*', 'libnvidia*']
os.system('sudo cp /usr/bin/calibre-mount-helper /tmp/calibre-mount-helper')
os.system('sudo chown kovid:users /tmp/calibre-mount-helper')
binary_includes = [
'/usr/bin/pdftohtml',
'/usr/lib/libwmflite-0.2.so.7',
'/usr/lib/liblcms.so.1',
'/usr/lib/liblcms2.so.2',
'/usr/lib/libstlport.so.5.1',
'/tmp/calibre-mount-helper',
'/usr/lib/libunrar.so',
'/usr/lib/libchm.so.0',
'/usr/lib/libsqlite3.so.0',
'/usr/lib/libmng.so.1',
'/usr/lib/libpodofo.so.0.8.2',
'/lib/libz.so.1',
'/lib/libuuid.so.1',
'/usr/lib/libtiff.so.5',
'/lib/libbz2.so.1',
'/usr/lib/libpoppler.so.6',
'/usr/lib/libxml2.so.2',
'/usr/lib/libopenjpeg.so.2',
'/usr/lib/libxslt.so.1',
'/usr/lib/libjpeg.so.7',
'/usr/lib/libxslt.so.1',
'/usr/lib/libgthread-2.0.so.0',
stdcpp,
ffi,
'/usr/lib/libpng14.so.14',
'/usr/lib/libexslt.so.0',
'/usr/lib/libMagickWand.so.3',
'/usr/lib/libMagickCore.so.3',
'/usr/lib/libgcrypt.so.11',
'/usr/lib/libgpg-error.so.0',
'/usr/lib/libphonon.so.4',
'/usr/lib/libssl.so.0.9.8',
'/usr/lib/libcrypto.so.0.9.8',
'/lib/libreadline.so.6',
]
binary_includes += [os.path.join(QTDIR, 'lib%s.so.4'%x) for x in QTDLLS]
CALIBRESRC = self.d(self.SRC)
CALIBREPLUGINS = os.path.join(CALIBRESRC, 'src', 'calibre', 'plugins')
FREEZE_DIR = os.path.join(CALIBRESRC, 'build', 'cx_freeze')
DIST_DIR = os.path.join(CALIBRESRC, 'dist')
os.chdir(CALIBRESRC)
self.info('Freezing calibre located at', CALIBRESRC)
entry_points = entry_points['console_scripts'] + entry_points['gui_scripts']
entry_points = ['calibre_postinstall=calibre.linux:main'] + entry_points
executables = {}
for ep in entry_points:
executables[ep.split('=')[0].strip()] = (ep.split('=')[1].split(':')[0].strip(),
ep.split(':')[-1].strip())
if os.path.exists(FREEZE_DIR):
shutil.rmtree(FREEZE_DIR)
os.makedirs(FREEZE_DIR)
if not os.path.exists(DIST_DIR):
os.makedirs(DIST_DIR)
includes = [x[0] for x in executables.values()]
includes += ['email.iterators', 'email.generator', 'sqlite3.dump']
excludes = ['matplotlib', "Tkconstants", "Tkinter", "tcl", "_imagingtk",
"ImageTk", "FixTk", 'wx', 'PyQt4.QtAssistant', 'PyQt4.QtOpenGL.so',
'PyQt4.QtScript.so', 'PyQt4.QtSql.so', 'PyQt4.QtTest.so', 'qt',
'glib', 'gobject']
packages = ['calibre', 'encodings', 'cherrypy', 'cssutils', 'xdg',
'dateutil', 'dns', 'email', 'dbus']
includes += ['calibre.gui2.convert.'+x.split('/')[-1].rpartition('.')[0] for x in \
glob.glob('src/calibre/gui2/convert/*.py')]
includes += ['calibre.gui2.catalog.'+x.split('/')[-1].rpartition('.')[0] for x in \
glob.glob('src/calibre/gui2/catalog/*.py')]
includes += ['calibre.gui2.actions.'+x.split('/')[-1].rpartition('.')[0] for x in \
glob.glob('src/calibre/gui2/actions/*.py')]
includes += ['calibre.gui2.preferences.'+x.split('/')[-1].rpartition('.')[0] for x in \
glob.glob('src/calibre/gui2/preferences/*.py')]
LOADER = '/tmp/loader.py'
open(LOADER, 'wb').write('# This script is never actually used.\nimport sys')
INIT_SCRIPT = '/tmp/init.py'
open(INIT_SCRIPT, 'wb').write(textwrap.dedent('''
## Load calibre module specified in the environment variable CALIBRE_CX_EXE
## Also restrict sys.path to the executables' directory and add the
## executables directory to LD_LIBRARY_PATH
import encodings
import os
import sys
import warnings
import zipimport
import locale
import codecs
enc = locale.getdefaultlocale()[1]
if not enc:
enc = locale.nl_langinfo(locale.CODESET)
enc = codecs.lookup(enc if enc else 'UTF-8').name
sys.setdefaultencoding(enc)
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
if DIR_NAME not in paths or not sys.getfilesystemencoding():
paths.insert(0, DIR_NAME)
os.environ['LD_LIBRARY_PATH'] = os.pathsep.join(paths)
os.environ['PYTHONIOENCODING'] = enc
os.execv(sys.executable, sys.argv)
sys.path = sys.path[:3]
sys.frozen = True
sys.frozen_path = DIR_NAME
sys.extensions_location = os.path.join(DIR_NAME, 'plugins')
sys.resources_location = os.path.join(DIR_NAME, 'resources')
dfv = os.environ.get('CALIBRE_DEVELOP_FROM', None)
if dfv and os.path.exists(dfv):
sys.path.insert(0, os.path.abspath(dfv))
executables = %(executables)s
exe = os.environ.get('CALIBRE_CX_EXE', False)
ret = 1
if not exe:
print >>sys.stderr, 'Invalid invocation of calibre loader. CALIBRE_CX_EXE not set'
elif exe not in executables:
print >>sys.stderr, 'Invalid invocation of calibre loader. CALIBRE_CX_EXE=%%s is unknown'%%exe
else:
sys.argv[0] = exe
module, func = executables[exe]
module = __import__(module, fromlist=[1])
func = getattr(module, func)
ret = func()
module = sys.modules.get("threading")
if module is not None:
module._shutdown()
sys.exit(ret)
''')%dict(executables=repr(executables)))
sys.argv = ['freeze', 'build_exe']
setup(
name = __appname__,
version = __version__,
executables = [Executable(script=LOADER, targetName='loader', compress=False)],
options = { 'build_exe' :
{
'build_exe' : os.path.join(CALIBRESRC, 'build/cx_freeze'),
'optimize' : 2,
'excludes' : excludes,
'includes' : includes,
'packages' : packages,
'init_script' : INIT_SCRIPT,
'copy_dependent_files' : True,
'create_shared_zip' : False,
}
}
)
def copy_binary(src, dest_dir):
dest = os.path.join(dest_dir, os.path.basename(src))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.realpath(src), dest)
shutil.copymode(os.path.realpath(src), dest)
for f in binary_includes:
copy_binary(f, FREEZE_DIR)
for pat in binary_excludes:
matches = glob.glob(os.path.join(FREEZE_DIR, pat))
for f in matches:
os.remove(f)
self.info('Adding ImageMagick...')
im = glob.glob('/usr/lib/ImageMagick-*')[0]
dest = os.path.join(FREEZE_DIR, 'ImageMagick')
shutil.copytree(im, dest)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.a'):
os.remove(os.path.join(x[0], f))
self.info('Adding calibre plugins...')
os.makedirs(os.path.join(FREEZE_DIR, 'plugins'))
for f in glob.glob(os.path.join(CALIBREPLUGINS, '*.so')):
copy_binary(f, os.path.join(FREEZE_DIR, 'plugins'))
self.info('Adding calibre resources...')
shutil.copytree('resources', os.path.join(FREEZE_DIR, 'resources'))
self.info('Adding Qt plugins...')
plugdir = os.path.join(QTDIR, 'plugins')
for dirpath, dirnames, filenames in os.walk(plugdir):
for f in filenames:
if not f.endswith('.so') or 'designer' in dirpath or 'codecs' in dirpath or 'sqldrivers' in dirpath:
continue
f = os.path.join(dirpath, f)
dest_dir = dirpath.replace(plugdir, os.path.join(FREEZE_DIR, 'qtplugins'))
copy_binary(f, dest_dir)
self.info('Creating launchers')
for exe in executables:
path = os.path.join(FREEZE_DIR, exe)
open(path, 'wb').write(textwrap.dedent('''\
#!/bin/sh
export CALIBRE_CX_EXE=%s
path=`readlink -e $0`
base=`dirname $path`
loader=$base/loader
export LD_LIBRARY_PATH=$base:$LD_LIBRARY_PATH
export MAGICK_CONFIGURE_PATH=$base/ImageMagick/config
export MAGICK_CODER_MODULE_PATH=$base/ImageMagick/modules-Q16/coders
export MAGICK_CODER_FILTER_PATH=$base/ImageMagick/modules-Q16/filter
export QT_PLUGIN_PATH=$base/qtplugins:$QT_PLUGIN_PATH
$loader "$@"
''')%exe)
os.chmod(path, 0755)
exes = list(executables.keys())
exes.remove('calibre_postinstall')
open(os.path.join(FREEZE_DIR, 'manifest'), 'wb').write('\n'.join(exes))
self.info('Creating archive...')
dist = open(os.path.join(DIST_DIR, 'calibre-%s-%s.tar.bz2'%(__version__,
arch)), 'wb')
with closing(tarfile.open(fileobj=dist, mode='w:bz2',
format=tarfile.PAX_FORMAT)) as tf:
for f in walk(FREEZE_DIR):
name = f.replace(FREEZE_DIR, '')[1:]
if name:
tf.add(f, name)
dist.flush()
dist.seek(0, 2)
self.info('Archive %s created: %.2f MB'%(dist.name,
dist.tell()/(1024.**2)))
| gpl-3.0 |
emetsger/osf.io | scripts/analytics/addons.py | 18 | 2173 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'box',
'dataverse',
'dropbox',
'figshare',
'github',
'googledrive',
'mendeley',
's3',
'zotero',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
maheshakya/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/exceptions.py | 50 | 5276 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'SkipTestWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class SkipTestWarning(UserWarning):
"""Warning class used to notify the user of a test that was skipped.
For example, one of the estimator checks requires a pandas import.
If the pandas package cannot be imported, the test will be skipped rather
than register as a failure.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| bsd-3-clause |
dgwakeman/mne-python | examples/realtime/ftclient_rt_compute_psd.py | 17 | 2460 | """
==============================================================
Compute real-time power spectrum density with FieldTrip client
==============================================================
Please refer to `ftclient_rt_average.py` for instructions on
how to get the FieldTrip connector working in MNE-Python.
This example demonstrates how to use it for continuous
computation of power spectra in real-time using the
get_data_as_epoch function.
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import FieldTripClient
from mne.time_frequency import compute_epochs_psd
print(__doc__)
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
fig, ax = plt.subplots(1)
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=False, include=[], exclude=bads)
n_fft = 256 # the FFT size. Ideally a power of 2
n_samples = 2048 # time window on which to compute FFT
for ii in range(20):
epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks)
psd, freqs = compute_epochs_psd(epoch, fmin=2, fmax=200, n_fft=n_fft)
cmap = 'RdBu_r'
freq_mask = freqs < 150
freqs = freqs[freq_mask]
log_psd = 10 * np.log10(psd[0])
tmin = epoch.events[0][0] / raw_info['sfreq']
tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq']
if ii == 0:
im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto',
origin='lower', cmap=cmap)
ax.set_yticks(np.arange(0, len(freqs), 10))
ax.set_yticklabels(freqs[::10].round(1))
ax.set_xlabel('Frequency (Hz)')
ax.set_xticks(np.arange(0, len(picks), 30))
ax.set_xticklabels(picks[::30])
ax.set_xlabel('MEG channel index')
im.set_clim()
else:
im.set_data(log_psd[:, freq_mask].T)
plt.title('continuous power spectrum (t = %0.2f sec to %0.2f sec)'
% (tmin, tmax), fontsize=10)
plt.pause(0.5)
plt.close()
| bsd-3-clause |
effigies/mne-python | examples/stats/plot_spatio_temporal_cluster_stats_sensor.py | 1 | 5435 | """
=====================================================
Spatiotemporal permutation F-test on full sensor data
=====================================================
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Significant spatiotemporal clusters will then
be visualized using custom matplotlib code.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.Raw(raw_fname, preload=True)
raw.filter(1, 30)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id, copy=False)
condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R'
X = [epochs[k].get_data() for k in condition_names] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
# load FieldTrip neighbor definition to setup sensor connectivity
from mne.channels import read_ch_connectivity
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
###############################################################################
# Compute statistic
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.001
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=2,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
###############################################################################
# Visualize clusters
# load viz functionality
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
# configure variables for visualization
times = epochs.times * 1e3
colors = 'r', 'r', 'steelblue', 'steelblue'
linestyles = '-', '--', '-', '--'
# grand average as numpy arrray
grand_ave = np.array(X).mean(axis=1)
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# loop over significant clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster infomation, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at significant sensors
signals = grand_ave[..., ch_inds].mean(axis=-1)
sig_times = times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
title = 'Cluster #{0}'.format(i_clu + 1)
fig.suptitle(title, fontsize=14)
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axis=ax_topo,
cmap='Reds', vmin=np.min, vmax=np.max)
# advanced matplotlib for showing image with figure and colorbar
# in one plot
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
*sig_times[[0, -1]]
))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
for signal, name, col, ls in zip(signals, condition_names, colors,
linestyles):
ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)
# add information
ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
ax_signals.set_xlim([times[0], times[-1]])
ax_signals.set_xlabel('time [ms]')
ax_signals.set_ylabel('evoked magnetic fields [fT]')
# plot significant time range
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
ax_signals.legend(loc='lower right')
ax_signals.set_ylim(ymin, ymax)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/IPython/external/qt_for_kernel.py | 9 | 2379 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from IPython.utils.warn import warn
from IPython.utils.version import check_version
from IPython.external.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from IPython.external.qt import QtCore, QtGui, QtSvg, QT_API
| bsd-3-clause |
nicholsn/ncanda-data-integration | scripts/redcap/scoring/psqi/__init__.py | 2 | 2693 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
import re
import pandas
import RwrapperNew
#
# Variables from surveys needed for PSQI
#
# LimeSurvey field names
lime_fields = [ "psqi1", "psqi2", "psqi3", "psqi4", "psqi_set1 [psqi5a]", "psqi_set1 [psqi5b]", "psqi_set1 [psqi5c]", "psqi_set1 [psqi5d]", "psqi_set1 [psqi5e]", "psqi_set2 [psqi5f]", "psqi_set2 [psqi5g]", "psqi_set2 [psqi5h]",
"psqi_set2 [psqi5i]", "psqi5j", "psqi5jc", "psqi6", "psqi_set3 [psqi7]", "psqi_set3 [psqi8]", "psqi9" ]
# Dictionary to recover LimeSurvey field names from REDCap names
rc2lime = dict()
for field in lime_fields:
rc2lime[RwrapperNew.label_to_sri( 'youthreport2', field )] = field
# REDCap fields names
input_fields = { 'youthreport2' : [ 'youth_report_2_complete', 'youthreport2_missing' ] + rc2lime.keys() }
#
# This determines the name of the form in REDCap where the results are posted.
#
output_form = 'clinical'
#
# PSQI field names mapping from R to REDCap
#
R2rc = { 'PSQI' : 'psqi_total' }
for field in [ 'PSQIDURAT', 'PSQIDISTB', 'PSQILATEN' ,'PSQIDAYDYS', 'PSQIHSE', 'PSQISLPQUAL', 'PSQIMEDS' ]:
R2rc[field] = re.sub( 'psqi', 'psqi_', field.lower() )
#
# Scoring function - take requested data (as requested by "input_fields") for each (subject,event), and demographics (date of birth, gender) for each subject.
#
def compute_scores( data, demographics ):
# Get rid of all records that don't have YR2
data.dropna( axis=1, subset=['youth_report_2_complete'] )
data = data[ data['youth_report_2_complete'] > 0 ]
data = data[ ~(data['youthreport2_missing'] > 0) ]
# If no records to score, return empty DF
if len( data ) == 0:
return pandas.DataFrame()
# Replace all column labels with the original LimeSurvey names
data.columns = RwrapperNew.map_labels( data.columns, rc2lime )
# Call the scoring function for all table rows
scores = data.apply( RwrapperNew.runscript, axis=1, Rscript='psqi/PSQI.R' )
# Replace all score columns with REDCap field names
scores.columns = RwrapperNew.map_labels( scores.columns, R2rc )
# Simply copy completion status from the input surveys
scores['psqi_complete'] = data['youth_report_2_complete'].map( int )
# Make a proper multi-index for the scores table
scores.index = pandas.MultiIndex.from_tuples(scores.index)
scores.index.names = ['study_id', 'redcap_event_name']
# Return the computed scores - this is what will be imported back into REDCap
outfield_list = [ 'psqi_complete' ] + R2rc.values()
return scores[ outfield_list ]
| bsd-3-clause |
heidi-ann/ocaml-raft-data | plotting_funct.py | 1 | 3255 | from matplotlib import rc
import matplotlib.pyplot as plt
import matplotlib.pylab as pyl
import numpy as np
import csv
from itertools import cycle
from utils import *
figs = ['org','expo','combo','fixed']
graph_a = ['12-24','25-50','50-100','100-200','150-300']
graph_b = ['150-151','150-155','150-175','150-200','150-300']
graph_a_labels = ['12--24 ms','25--50 ms','50--100 ms','100--200 ms','150--300 ms']
graph_b_labels = ['150--151 ms','150--155 ms','150--175 ms','150--200 ms','150--300 ms']
lines = ["-","--","-.",":"]
def plot_graph_a (data_x,data_y,name):
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)
# for line colours
linecycler = cycle(lines)
for line in graph_a:
if (len(data_x[line]) != len(data_y)):
print (line)
axes.plot(data_x[line],data_y,next(linecycler), lw=1.0)
# labels
axes.set_xlabel('Time to elect leader [ms]',fontsize=9)
axes.set_ylabel('Cumulative percent',fontsize=9)
if "diago" in name:
axes.set_title('\\textbf{Original}~(Ongaro and Ousterhout)',fontsize=9)
elif not("combo" in name) and not ("expo" in name) and not ("fixed" in name):
axes.set_title('\\textbf{Reproduction}',fontsize=9)
# ticks & axes
x_marked = range(0, 401, 50)
axes.set_xticks(x_marked)
axes.set_xticklabels([str(x) for x in x_marked])
y_marked = [0,20,40,60,80,100]
axes.set_yticks(y_marked)
axes.set_yticklabels(['0%','20%','40%','60%','80%','100%'])
axes.set_xlim([0,400])
axes.legend(graph_a_labels,loc=4,frameon=False,handlelength=2.5)
fig.show()
fig_to_file(fig,'graph_a_'+name,'pdf')
# In[ ]:
def plot_graph_b(data_x,data_y,name,x_min=0,x_max=10000,log=True,letter=''):
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1)
linecycler = cycle(lines)
for line in graph_b:
if (len(data_x[line]) != len(data_y)):
print (line)
print (len(y_axis))
axes.plot(data_x[line],data_y,next(linecycler), lw=1.0)
# labels
xlabel = 'Time to elect leader'
if log:
xlabel += ' [ms, $\log_{10}$ scale]'
else:
xlabel += ' [ms]'
axes.set_xlabel(xlabel,fontsize=9)
axes.set_ylabel('Cumulative percent',fontsize=9)
if "diago" in name:
axes.set_title('\\textbf{Original}~(Ongaro and Ousterhout)',fontsize=9)
elif not("combo" in name) and not ("expo" in name) and not ("fixed" in name):
axes.set_title('\\textbf{Reproduction}',fontsize=9)
# ticks & axes
if (log):
axes.set_xscale("log")
x_marked = [75, 150, 300, 1000, 3000, 10000]
axes.set_xticks(x_marked)
axes.set_xticklabels(x_marked)
axes.set_xlim([75,x_max])
else:
x_marked = range(0, 401, 50)
axes.set_xticks(x_marked)
axes.set_xticklabels([str(x) for x in x_marked])
axes.set_xlim([x_min,x_max])
y_marked = [0,20,40,60,80,100]
axes.set_yticks(y_marked)
axes.set_yticklabels(['0%','20%','40%','60%','80%','100%'])
axes.legend(graph_a_labels,loc=4,frameon=False,handlelength=2.5)
fig.show()
fig_to_file(fig,'graph_b_'+name,'pdf')
| mit |
russel1237/scikit-learn | sklearn/ensemble/gradient_boosting.py | 12 | 69795 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.validation import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if self.presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort='auto')
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/tools/tests/test_merge.py | 3 | 98825 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
import pandas as pd
from pandas.compat import range, lrange, lzip, zip, StringIO
from pandas import compat
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal,
makeCustomDataframe as mkdf,
assertRaisesRegexp)
from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table, read_csv
import pandas.algos as algos
import pandas.util.testing as tm
a_ = np.array
N = 50
NGROUPS = 8
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = algos.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = algos.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = algos.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key1.bar', joined)
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key2.bar', joined)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
self.assert_numpy_array_equal(merged['MergedA'], target['A'])
self.assert_numpy_array_equal(merged['MergedD'], target['D'])
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
self.assertTrue(np.isnan(joined['two']['c']))
self.assertTrue(np.isnan(joined['three']['c']))
# merge column not p resent
self.assertRaises(KeyError, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(ValueError, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
self.assertIn(col, merged)
self.assertTrue(merged[col].isnull().all())
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
self.assertTrue(merged2.columns.equals(merged.columns))
self.assertEqual(len(merged2), 0)
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notnull()]
self.assert_numpy_array_equal(joined['key'], expected['key'])
self.assert_numpy_array_equal(joined['value'], expected['value'])
self.assertTrue(joined.index.equals(expected.index))
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
self.assertEqual(df1['B'].dtype, np.int64)
self.assertEqual(df1['D'].dtype, np.bool_)
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in JOIN_TYPES:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sortlevel(0)
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
df1 = df1.sortlevel(1)
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
self.assertTrue(joined.index.is_monotonic)
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.ix[:, expected.columns])
def test_join_hierarchical_mixed(self):
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
result = merge(new_df, other_df, left_index=True, right_index=True)
self.assertTrue(('b', 'mean') in result)
self.assertTrue('b' in result)
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype = np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype = np.float32)
joined = a.join(b)
self.assertEqual(joined.dtypes['a'], 'float64')
self.assertEqual(joined.dtypes['b'], 'float64')
self.assertEqual(joined.dtypes['c'], 'float32')
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c })
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
self.assertEqual(rs.dtypes['a'], 'int64')
self.assertEqual(rs.dtypes['b'], 'float64')
self.assertEqual(rs.dtypes['c'], 'float32')
self.assertEqual(rs.dtypes['md'], 'float32')
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
result['a'] = result['a'].astype(np.float64)
result['b'] = result['b'].astype(np.float64)
assert_frame_equal(result, expected.ix[:, result.columns])
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(ValueError, merge, self.left, self.right,
left_index=True)
self.assertRaises(ValueError, merge, self.left, self.right,
right_index=True)
self.assertRaises(ValueError, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assertIn('v1_x', merged)
self.assertIn('v1_y', merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
assert_almost_equal(merged['lkey'],
['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan])
assert_almost_equal(merged['rkey'],
['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'])
assert_almost_equal(merged['value_x'], [2, 3, 1, 1, 4, 4, np.nan])
assert_almost_equal(merged['value_y'], [6, np.nan, 5, 8, 5, 8, 7])
def test_merge_copy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=True)
merged['a'] = 6
self.assertTrue((left['a'] == 0).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'bar').all())
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assertTrue((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'peekaboo').all())
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
self.assert_numpy_array_equal(joined.index, lrange(4))
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5.],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': np.array([0, 1, 0, 1, 2, 2, 3, 4, 5])},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected, check_dtype=False)
self.assertTrue(joined._data.is_consolidated())
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assertTrue(merged['key'].notnull().all())
self.assertTrue(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_numpy_array_equal(merged['key_0'],
np.array([1, 1, 1, 1, 2, 2, 3, 4, 5]))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3])
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_numpy_array_equal(merged['key_0'], key)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assertTrue((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1" : [0, 1], "i2" : [0, 1]})
df2 = DataFrame({"i1" : [0], "i3" : [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan}, 'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}}).set_index(None).reset_index()[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1" : [0, 1], "i2" : [0.5, 1.5]})
df2 = DataFrame({"i1" : [0], "i3" : [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5}, 'i3': {0: 0.69999999999999996,
1: nan}})[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
tm.assert_isinstance(result, NotADataFrame)
def test_append_dtype_coerce(self):
# GH 4993
# appending with datetime will incorrectly convert datetime64
import datetime as dt
from pandas import NaT
df1 = DataFrame(index=[1,2], data=[dt.datetime(2013,1,1,0,0),
dt.datetime(2013,1,2,0,0)],
columns=['start_time'])
df2 = DataFrame(index=[4,5], data=[[dt.datetime(2013,1,3,0,0),
dt.datetime(2013,1,3,6,10)],
[dt.datetime(2013,1,4,0,0),
dt.datetime(2013,1,4,7,10)]],
columns=['start_time','end_time'])
expected = concat([
Series([NaT,NaT,dt.datetime(2013,1,3,6,10),dt.datetime(2013,1,4,7,10)],name='end_time'),
Series([dt.datetime(2013,1,1,0,0),dt.datetime(2013,1,2,0,0),dt.datetime(2013,1,3,0,0),dt.datetime(2013,1,4,0,0)],name='start_time'),
],axis=1)
result = df1.append(df2,ignore_index=True)
assert_frame_equal(result, expected)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56) ],
't': [ dt.timedelta(0, 22500),
dt.timedelta(0, 22500) ]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td,td],index=["A","B"]))
rhs = DataFrame(Series([td],index=["A"]))
from pandas import NaT
result = lhs.join(rhs,rsuffix='r', how="left")
expected = DataFrame({ '0' : Series([td,td],index=list('AB')), '0r' : Series([td,NaT],index=list('AB')) })
assert_frame_equal(result, expected)
def test_overlapping_columns_error_message(self):
# #2649
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
self.assertRaises(ValueError, merge, df, df2)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
assert_frame_equal(result, expected, check_names=False) # TODO check_names on merge?
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)
right = expected.ix[:, joined.columns].sort(['key1', 'key2'],
kind='mergesort')
assert_frame_equal(left, right)
def test_left_join_multi_index(self):
icols = ['1st', '2nd', '3rd']
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord('a')
return f(df['1st']) + f(df['3rd'])* 1e2 + df['2nd'].fillna(0) * 1e4
def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
self.assertTrue(len(left) < len(res) + 1)
self.assertFalse(res['4th'].isnull().any())
self.assertFalse(res['5th'].isnull().any())
tm.assert_series_equal(res['4th'], - res['5th'])
tm.assert_series_equal(res['4th'], bind_cols(res.iloc[:, :-2]))
if sort:
tm.assert_frame_equal(res,
res.sort(icols, kind='mergesort'))
out = merge(left, right.reset_index(), on=icols,
sort=sort, how='left')
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)),
columns=['1st', '3rd'])
left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left['4th'] = bind_cols(left)
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
# inject some nulls
left.loc[1::23, '1st'] = np.nan
left.loc[2::37, '2nd'] = np.nan
left.loc[3::43, '3rd'] = np.nan
left['4th'] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
for sort in [False, True]:
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left', sort=sort)
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right', sort=sort)
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merged = merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.sort(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3' : np.array([0, 1, 2]*8, dtype=np.float32),
'v': np.array(np.arange(24),dtype=np.int32) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.sort(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame([
['X', 'Y', 'C', 'a'],
['W', 'Y', 'C', 'e'],
['V', 'Q', 'A', 'h'],
['V', 'R', 'D', 'i'],
['X', 'Y', 'D', 'b'],
['X', 'Y', 'A', 'c'],
['W', 'Q', 'B', 'f'],
['W', 'R', 'C', 'g'],
['V', 'Y', 'C', 'j'],
['X', 'Y', 'B', 'd']],
columns=['cola', 'colb', 'colc', 'tag'],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])
right = DataFrame([
['W', 'R', 'C', 0],
['W', 'Q', 'B', 3],
['W', 'Q', 'B', 8],
['X', 'Y', 'A', 1],
['X', 'Y', 'A', 4],
['X', 'Y', 'B', 5],
['X', 'Y', 'C', 6],
['X', 'Y', 'C', 9],
['X', 'Q', 'C', -6],
['X', 'R', 'C', -9],
['V', 'Y', 'C', 7],
['V', 'R', 'D', 2],
['V', 'R', 'D', -1],
['V', 'Q', 'A', -3]],
columns=['col1', 'col2', 'col3', 'val'])
right.set_index(['col1', 'col2', 'col3'], inplace=True)
result = left.join(right, on=['cola', 'colb', 'colc'], how='left')
expected = DataFrame([
['X', 'Y', 'C', 'a', 6],
['X', 'Y', 'C', 'a', 9],
['W', 'Y', 'C', 'e', nan],
['V', 'Q', 'A', 'h', -3],
['V', 'R', 'D', 'i', 2],
['V', 'R', 'D', 'i', -1],
['X', 'Y', 'D', 'b', nan],
['X', 'Y', 'A', 'c', 1],
['X', 'Y', 'A', 'c', 4],
['W', 'Q', 'B', 'f', 3],
['W', 'Q', 'B', 'f', 8],
['W', 'R', 'C', 'g', 0],
['V', 'Y', 'C', 'j', 7],
['X', 'Y', 'B', 'd', 5]],
columns=['cola', 'colb', 'colc', 'tag', 'val'],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['cola', 'colb', 'colc'],
how='left', sort=True)
tm.assert_frame_equal(result,
expected.sort(['cola', 'colb', 'colc'], kind='mergesort'))
# GH7331 - maintain left frame order in left merge
right.reset_index(inplace=True)
right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()
result = merge(left, right, how='left', on=left.columns[:-1].tolist())
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame([
['c', 0],
['b', 1],
['a', 2],
['b', 3]],
columns=['tag', 'val'],
index=[2, 0, 1, 3])
right = DataFrame([
['a', 'v'],
['c', 'w'],
['c', 'x'],
['d', 'y'],
['a', 'z'],
['c', 'r'],
['e', 'q'],
['c', 's']],
columns=['tag', 'char'])
right.set_index('tag', inplace=True)
result = left.join(right, on='tag', how='left')
expected = DataFrame([
['c', 0, 'w'],
['c', 0, 'x'],
['c', 0, 'r'],
['c', 0, 's'],
['b', 1, nan],
['a', 2, 'v'],
['a', 2, 'z'],
['b', 3, nan]],
columns=['tag', 'val', 'char'],
index=[2, 2, 2, 2, 0, 1, 1, 3])
tm.assert_frame_equal(result, expected)
result = left.join(right, on='tag', how='left', sort=True)
tm.assert_frame_equal(result, expected.sort('tag', kind='mergesort'))
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how='left', on='tag')
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1,dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan,dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['k1', 'k2'], sort=True)
expected.sort(['k1', 'k2'], kind='mergesort', inplace=True)
tm.assert_frame_equal(result, expected)
for d1 in [np.int64,np.int32,np.int16,np.int8,np.uint8]:
for d2 in [np.int64,np.float64,np.float32,np.float16]:
_test(np.dtype(d1),np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
from pandas.core.groupby import _int64_overflow_possible
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
self.assertEqual(len(out), len(left))
assert_series_equal(out['left'], - out['right'])
assert_series_equal(out['left'], out.iloc[:, :-2].sum(axis=1))
out.sort(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(pd.Series.nunique).values
self.assertTrue(_int64_overflow_possible(shape))
# add duplicates to left frame
left = pd.concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7)).astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = pd.concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notnull(),
'right': out['right'].notnull(),
'inner': out['left'].notnull() & out['right'].notnull(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
self.assertTrue(mask.all() ^ mask.any() or how == 'outer')
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = DataFrame(dict(household_id = [1,2,3],
male = [0,1,0],
wealth = [196087.3,316478.7,294750]),
columns = ['household_id','male','wealth']).set_index('household_id')
portfolio = DataFrame(dict(household_id = [1,2,2,3,3,3,4],
asset_id = ["nl0000301109","nl0000289783","gb00b03mlx29","gb00b03mlx29","lu0197800237","nl0000289965",np.nan],
name = ["ABN Amro","Robeco","Royal Dutch Shell","Royal Dutch Shell","AAB Eastern Europe Equity Fund","Postbank BioTech Fonds",np.nan],
share = [1.0,0.4,0.6,0.15,0.6,0.25,1.0]),
columns = ['household_id','asset_id','name','share']).set_index(['household_id','asset_id'])
result = household.join(portfolio, how='inner')
expected = DataFrame(dict(male = [0,1,1,0,0,0],
wealth = [ 196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0 ],
name = ['ABN Amro','Robeco','Royal Dutch Shell','Royal Dutch Shell','AAB Eastern Europe Equity Fund','Postbank BioTech Fonds'],
share = [1.00,0.40,0.60,0.15,0.60,0.25],
household_id = [1,2,2,3,3,3],
asset_id = ['nl0000301109','nl0000289783','gb00b03mlx29','gb00b03mlx29','lu0197800237','nl0000289965']),
).set_index(['household_id','asset_id']).reindex(columns=['male','wealth','name','share'])
assert_frame_equal(result,expected)
assert_frame_equal(result,expected)
# equivalency
result2 = merge(household.reset_index(),portfolio.reset_index(),on=['household_id'],how='inner').set_index(['household_id','asset_id'])
assert_frame_equal(result2,expected)
result = household.join(portfolio, how='outer')
expected = concat([expected,DataFrame(dict(share = [1.00]),
index=MultiIndex.from_tuples([(4,np.nan)],
names=['household_id','asset_id']))],
axis=0).reindex(columns=expected.columns)
assert_frame_equal(result,expected)
# invalid cases
household.index.name = 'foo'
def f():
household.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
portfolio2 = portfolio.copy()
portfolio2.index.set_names(['household_id','foo'])
def f():
portfolio2.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = DataFrame(dict(household_id = [1,2,2,3,3,3,4],
asset_id = ["nl0000301109","nl0000301109","gb00b03mlx29","gb00b03mlx29","lu0197800237","nl0000289965",np.nan],
share = [1.0,0.4,0.6,0.15,0.6,0.25,1.0]),
columns = ['household_id','asset_id','share']).set_index(['household_id','asset_id'])
log_return = DataFrame(dict(
asset_id = ["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"],
t = [233, 234, 235, 180, 181],
log_return = [.09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["asset_id","t"])
expected = DataFrame(dict(
household_id = [2, 2, 2, 3, 3, 3, 3, 3],
asset_id = ["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"],
t = [233, 234, 235, 233, 234, 235, 180, 181],
share = [0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return = [.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["household_id", "asset_id", "t"]).reindex(columns=['share','log_return'])
def f():
household.join(log_return, how='inner')
self.assertRaises(NotImplementedError, f)
# this is the equivalency
result = merge(household.reset_index(),log_return.reset_index(),on=['asset_id'],how='inner').set_index(['household_id','asset_id','t'])
assert_frame_equal(result,expected)
expected = DataFrame(dict(
household_id = [1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id = ["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None],
t = [None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None],
share = [1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0],
log_return = [None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None]
)).set_index(["household_id", "asset_id", "t"])
def f():
household.join(log_return, how='outer')
self.assertRaises(NotImplementedError, f)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notnull().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.ix[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.ix[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isnull().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
class TestConcatenate(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.frame = DataFrame(tm.getSeriesData())
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
self.assertIn('A', partial_appended)
partial_appended = end_frame.append(begin_frame)
self.assertIn('A', partial_appended)
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
# all equal except 'foo' column
assert_frame_equal(
mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),
mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))
# append empty
empty = DataFrame({})
appended = self.frame.append(empty)
assert_frame_equal(self.frame, appended)
self.assertIsNot(appended, self.frame)
appended = empty.append(self.frame)
assert_frame_equal(self.frame, appended)
self.assertIsNot(appended, self.frame)
# overlap
self.assertRaises(ValueError, self.frame.append, self.frame,
verify_integrity=True)
# new columns
# GH 6129
df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}})
row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z')
expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': {'x': 3, 'y': 4, 'z': 6}, 'c' : {'z' : 7}})
result = df.append(row)
assert_frame_equal(result, expected)
def test_append_length0_frame(self):
df = DataFrame(columns=['A', 'B', 'C'])
df3 = DataFrame(index=[0, 1], columns=['A', 'B'])
df5 = df.append(df3)
expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])
assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=('i4,f4,a10'))
arr1[:] = [(1, 2., 'Hello'), (2, 3., "World")]
arr2 = np.zeros((3,), dtype=('i4,f4,a10'))
arr2[:] = [(3, 4., 'foo'),
(5, 6., "bar"),
(7., 8., 'baz')]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
assert_frame_equal(result, expected)
def test_append_different_columns(self):
df = DataFrame({'bools': np.random.randn(10) > 0,
'ints': np.random.randint(0, 10, 10),
'floats': np.random.randn(10),
'strings': ['foo', 'bar'] * 5})
a = df[:5].ix[:, ['bools', 'ints', 'floats']]
b = df[5:].ix[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
self.assertTrue(isnull(appended['strings'][0:4]).all())
self.assertTrue(isnull(appended['bools'][5:]).all())
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
self.frame[10:15], self.frame[15:]]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, self.frame)
chunks[-1] = chunks[-1].copy()
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame)
self.assertTrue((result['foo'][15:] == 'bar').all())
self.assertTrue(result['foo'][:15].isnull().all())
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(data=None, columns=['A', 'B', 'C'])
df1 = df1.set_index(['A'])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
columns=['A', 'B', 'C'])
df2 = df2.set_index(['A'])
result = df1.append(df2)
self.assertEqual(result.index.name, 'A')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.ix[:, ['A', 'B']]
df2 = df.ix[:, ['C', 'D']]
df3 = df.ix[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_append_missing_column_proper_upcast(self):
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})
df2 = DataFrame({'B': np.array([True, False, True, False],
dtype=bool)})
appended = df1.append(df2, ignore_index=True)
self.assertEqual(appended['A'].dtype, 'f8')
self.assertEqual(appended['B'].dtype, 'O')
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0,10,size=4).reshape(4,1))
df3 = DataFrame({5 : 'foo'},index=range(4))
# these are actual copies
result = concat([df,df2,df3],axis=1,copy=True)
for b in result._data.blocks:
self.assertIsNone(b.values.base)
# these are the same
result = concat([df,df2,df3],axis=1,copy=False)
for b in result._data.blocks:
if b.is_float:
self.assertTrue(b.values.base is df._data.blocks[0].values.base)
elif b.is_integer:
self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
elif b.is_object:
self.assertIsNotNone(b.values.base)
# float block was consolidated
df4 = DataFrame(np.random.randn(4,1))
result = concat([df,df2,df3,df4],axis=1,copy=False)
for b in result._data.blocks:
if b.is_float:
self.assertIsNone(b.values.base)
elif b.is_integer:
self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
elif b.is_object:
self.assertIsNotNone(b.values.base)
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 0, 1, 2, 3]])
expected = DataFrame(np.r_[df.values, df2.values],
index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values],
index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values],
columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values],
columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]]
level = ['three', 'two', 'one', 'zero']
result = concat(pieces, axis=1, keys=['one', 'two', 'three'],
levels=[level],
names=['group_key'])
self.assert_numpy_array_equal(result.columns.levels[0], level)
self.assertEqual(result.columns.names[0], 'group_key')
def test_concat_dataframe_keys_bug(self):
t1 = DataFrame({'value': Series([1, 2, 3],
index=Index(['a', 'b', 'c'], name='id'))})
t2 = DataFrame({'value': Series([7, 8],
index=Index(['a', 'b'], name='id'))})
# it works
result = concat([t1, t2], axis=1, keys=['t1', 't2'])
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
'baz': DataFrame(np.random.randn(4, 3)),
'qux': DataFrame(np.random.randn(4, 3))}
sorted_keys = sorted(frames)
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,
axis=1)
tm.assert_frame_equal(result, expected)
keys = ['baz', 'foo', 'bar']
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self):
frame1 = DataFrame({"test1": ["a", "b", "c"],
"test2": [1, 2, 3],
"test3": [4.5, 3.2, 1.2]})
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True)
nan = np.nan
expected = DataFrame([[nan, nan, nan, 4.3],
['a', 1, 4.5, 5.2],
['b', 2, 3.2, 2.2],
['c', 3, 1.2, nan]],
index=Index(["q", "x", "y", "z"]))
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
result = concat([frame, frame], keys=[0, 1], names=['iteration'])
self.assertEqual(result.index.names, ('iteration',) + index.names)
tm.assert_frame_equal(result.ix[0], frame)
tm.assert_frame_equal(result.ix[1], frame)
self.assertEqual(result.index.nlevels, 3)
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame({'dt': [datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3)],
'b': ['A', 'B', 'C'],
'c': [1, 2, 3], 'd': [4, 5, 6]})
df['dt'] = df['dt'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df = df.set_index(['dt', 'b'])
exp_idx1 = pd.DatetimeIndex(['2014-01-01', '2014-01-02', '2014-01-03'] * 2,
tz='US/Pacific', name='dt')
exp_idx2 = Index(['A', 'B', 'C'] * 2, name='b')
exp_idx = pd.MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'c': [1, 2, 3] * 2, 'd': [4, 5, 6] * 2},
index=exp_idx, columns=['c', 'd'])
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [['foo', 'baz'], ['one', 'two']]
names = ['first', 'second']
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels,
names=names)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(levels=levels + [[0]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 0, 0]],
names=names + [None])
expected.index = exp_index
assert_frame_equal(result, expected)
# no names
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels)
self.assertEqual(result.index.names, (None,) * 3)
# no levels
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second') + (None,))
self.assert_numpy_array_equal(result.index.levels[0], ['baz', 'foo'])
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
self.assertRaises(ValueError, concat, [df, df],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
self.assertRaises(ValueError, concat, [df, df2],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
def test_concat_rename_index(self):
a = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_a'))
b = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_b'))
result = concat([a, b], keys=['key0', 'key1'],
names=['lvl0', 'lvl1'])
exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])
names = list(exp.index.names)
names[1] = 'lvl1'
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
self.assertEqual(result.index.names, exp.index.names)
def test_crossed_dtypes_weird_corner(self):
columns = ['A', 'B', 'C', 'D']
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),
'B': np.array([1, 2, 3, 4], dtype='i8'),
'C': np.array([1, 2, 3, 4], dtype='f8'),
'D': np.array([1, 2, 3, 4], dtype='i8')},
columns=columns)
df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),
'B': np.array([1, 2, 3, 4], dtype='f8'),
'C': np.array([1, 2, 3, 4], dtype='i8'),
'D': np.array([1, 2, 3, 4], dtype='f8')},
columns=columns)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),
columns=columns)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
result = concat(
[df, df2], keys=['one', 'two'], names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second'))
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(np.random.randint(0,10,size=40).reshape(10,4),columns=['A','A','C','C'])
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:4],df)
assert_frame_equal(result.iloc[:,4:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# multi dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:6],df)
assert_frame_equal(result.iloc[:,6:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# append
result = df.iloc[0:8,:].append(df.iloc[8:])
assert_frame_equal(result, df)
result = df.iloc[0:8,:].append(df.iloc[8:9]).append(df.iloc[9:10])
assert_frame_equal(result, df)
expected = concat([df,df],axis=0)
result = df.append(df)
assert_frame_equal(result, expected)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
expected = concat([df,df],axis=1)
result = df.join(df,rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4,2), columns=["x", "y"])
x = DataFrame(np.random.randn(4,2), columns=["x", "y"])
y = DataFrame(np.random.randn(4,2), columns=["x", "y"])
z = DataFrame(np.random.randn(4,2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x,y,z,w],axis=1)
expected.columns=['x_x','y_x','x_y','y_y','x_x','y_x','x_y','y_y']
assert_frame_equal(dta,expected)
def test_handle_empty_objects(self):
df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))
baz = df[:5].copy()
baz['foo'] = 'bar'
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
expected.loc[0:4,'foo'] = 'bar'
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(dict(A = range(10000)),index=date_range('20130101',periods=10000,freq='s'))
empty = DataFrame()
result = concat([df,empty],axis=1)
assert_frame_equal(result, df)
result = concat([empty,df],axis=1)
assert_frame_equal(result, df)
result = concat([df,empty])
assert_frame_equal(result, df)
result = concat([empty,df])
assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index=date_range('01-Jan-2013', periods=10, freq='H')
arr = np.arange(10, dtype='int64')
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1,1), index=index)
expected = DataFrame(np.repeat(arr,2).reshape(-1,2), index=index, columns = [0, 0])
result = concat([df,df], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr,2).reshape(-1,2), index=index, columns = [0, 1])
result = concat([s1,s2], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = [0, 1, 2])
result = concat([s1,s2,s1], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr,5).reshape(-1,5), index=index, columns = [0, 0, 1, 2, 3])
result = concat([s1,df,s2,s2,s1], axis=1)
assert_frame_equal(result, expected)
# with names
s1.name = 'foo'
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = ['foo', 0, 0])
result = concat([s1,df,s2], axis=1)
assert_frame_equal(result, expected)
s2.name = 'bar'
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = ['foo', 0, 'bar'])
result = concat([s1,df,s2], axis=1)
assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = [0, 1, 2])
result = concat([s1,df,s2], axis=1, ignore_index=True)
assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(np.tile(arr,3).reshape(-1,1), index=index.tolist() * 3, columns = [0])
result = concat([s1,df,s2])
assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr,3).reshape(-1,1), columns = [0])
result = concat([s1,df,s2], ignore_index=True)
assert_frame_equal(result, expected)
# invalid concatente of mixed dims
panel = tm.makePanel()
self.assertRaises(ValueError, lambda : concat([panel,s1],axis=1))
def test_panel_join(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[:2, :10, :3]
p2 = panel.ix[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.ix[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.ix[['ItemB', 'ItemC']]
# Expected index is
#
# ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.ix[['ItemA']]
expected = no_overlap.join(p1_suf.join(p2_suf))
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(compat.iteritems(p))
joined = panels[0].join(panels[1:], how='inner')
expected = Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='right')
def test_panel_concat_other_axes(self):
panel = tm.makePanel()
p1 = panel.ix[:, :5, :]
p2 = panel.ix[:, 5:, :]
result = concat([p1, p2], axis=1)
tm.assert_panel_equal(result, panel)
p1 = panel.ix[:, :, :2]
p2 = panel.ix[:, :, 2:]
result = concat([p1, p2], axis=2)
tm.assert_panel_equal(result, panel)
# if things are a bit misbehaved
p1 = panel.ix[:2, :, :2]
p2 = panel.ix[:, :, 2:]
p1['ItemC'] = 'baz'
result = concat([p1, p2], axis=2)
expected = panel.copy()
expected['ItemC'] = expected['ItemC'].astype('O')
expected.ix['ItemC', :, :2] = 'baz'
tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
# #2257
def make_panel():
index = 5
cols = 3
def df():
return DataFrame(np.random.randn(index, cols),
index=["I%s" % i for i in range(index)],
columns=["C%s" % i for i in range(cols)])
return Panel(dict([("Item%s" % x, df()) for x in ['A', 'B', 'C']]))
panel1 = make_panel()
panel2 = make_panel()
panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
for x in panel2.major_axis]),
axis=1)
panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
# it works!
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
p4d = tm.makePanel4D()
p1 = p4d.ix[:, :, :5, :]
p2 = p4d.ix[:, :, 5:, :]
result = concat([p1, p2], axis=2)
tm.assert_panel4d_equal(result, p4d)
p1 = p4d.ix[:, :, :, :2]
p2 = p4d.ix[:, :, :, 2:]
result = concat([p1, p2], axis=3)
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
p1 = p4d.ix[:, :2, :, :2]
p2 = p4d.ix[:, :, :, 2:]
p1['L5'] = 'baz'
result = concat([p1, p2], axis=3)
p2['L5'] = np.nan
expected = concat([p1, p2], axis=3)
expected = expected.ix[result.labels]
tm.assert_panel4d_equal(result, expected)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = 'foo'
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
self.assertEqual(result.name, ts.name)
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))
exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),
np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],
labels=exp_labels)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
assert_frame_equal(result, expected)
result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
expected = DataFrame(pieces, index=['A', 'B', 'C']).T
assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name='A')
s2 = Series(randn(5), name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
self.assertTrue(np.array_equal(result.columns, lrange(2)))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=['foo'])
expected = concat([df, df], keys=['foo', 'bar'])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
self.assertRaises(ValueError, concat, [None, None])
def test_concat_datetime64_block(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=10)
df = DataFrame({'time': rng})
result = concat([df, df])
self.assertTrue((result.iloc[:10]['time'] == rng).all())
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10),unit='s')
df = DataFrame({'time': rng})
result = concat([df, df])
self.assertTrue((result.iloc[:10]['time'] == rng).all())
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat([None, df0, df0[:2], df0[:1], df0],
keys=['a', 'b', 'c', 'd', 'e'])
expected = concat([df0, df0[:2], df0[:1], df0],
keys=['b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
## to join with union
## these two are of different length!
left = concat([ts1, ts2], join='outer', axis=1)
right = concat([ts2, ts1], join='outer', axis=1)
self.assertEqual(len(left), len(right))
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = 'same name'
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns=['same name', 'same name']
assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })
df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})
expected = DataFrame([[0,6,'rrr',9,1,6],
[0,6,'rrr',10,2,6],
[0,6,'rrr',11,3,6],
[0,6,'rrr',12,4,6]])
expected.columns = ['firmNo','prc','stringvar','C','misc','prc']
result = concat([df1,df2],axis=1)
assert_frame_equal(result,expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
self.assertTrue(np.array_equal(result.columns, [0, 1]))
def test_concat_iterables(self):
from collections import deque, Iterable
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
assert_frame_equal(pd.concat((df1, df2), ignore_index=True), expected)
assert_frame_equal(pd.concat([df1, df2], ignore_index=True), expected)
assert_frame_equal(pd.concat((df for df in (df1, df2)), ignore_index=True), expected)
assert_frame_equal(pd.concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1(object):
def __len__(self):
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError:
raise IndexError
assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(Iterable):
def __iter__(self):
yield df1
yield df2
assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = mkdf(10, 2)
for obj in [1, dict(), [1, 2], (1, 2) ]:
self.assertRaises(TypeError, lambda x: concat([ df1, obj ]))
def test_concat_invalid_first_argument(self):
df1 = mkdf(10, 2)
df2 = mkdf(10, 2)
self.assertRaises(TypeError, concat, df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5,5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
assert_frame_equal(result,expected)
class TestOrderedMerge(tm.TestCase):
def setUp(self):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
# GH #813
def test_basic(self):
result = ordered_merge(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = ordered_merge(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = concat([self.left, self.left], ignore_index=True)
# right = concat([self.right, self.right], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
# right['group'] = ['a'] * 4 + ['b'] * 4
result = ordered_merge(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.ix[:, result.columns])
result2 = ordered_merge(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.ix[:, result.columns])
result = ordered_merge(left, self.right, on='key', left_by='group')
self.assertTrue(result['group'].notnull().all())
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
tm.assert_isinstance(result, NotADataFrame)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
chrsrds/scikit-learn | sklearn/tests/test_isotonic.py | 1 | 16117 | import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression, _make_unique)
from sklearn.utils.validation import check_array
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
from scipy.special import expit
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = \
ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_small_number_of_samples():
x = [0, 1, 2]
y = [1, 1.1, 1.05]
is_increasing = assert_no_warnings(check_increasing, x, y)
assert is_increasing
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert is_increasing
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert is_increasing
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert not is_increasing
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert not is_increasing
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert not is_increasing
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
y = np.array([10, 0, 2])
y_ = np.array([4, 4, 4])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_with_ties_in_differently_sized_groups():
"""
Non-regression test to handle issue 9432:
https://github.com/scikit-learn/scikit-learn/issues/9432
Compare against output in R:
> library("isotone")
> x <- c(0, 1, 1, 2, 3, 4)
> y <- c(0, 0, 1, 0, 0, 1)
> res1 <- gpava(x, y, ties="secondary")
> res1$x
`isotone` version: 1.1-0, 2015-07-24
R version: R version 3.3.2 (2016-10-31)
"""
x = np.array([0, 1, 1, 2, 3, 4])
y = np.array([0, 0, 1, 0, 0, 1])
y_true = np.array([0., 0.25, 0.25, 0.25, 0.25, 1.])
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true)
assert_array_almost_equal(ir.fit_transform(x, y), y_true)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert all(["invalid value encountered in "
in str(warn.message) for warn in w])
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert not is_increasing
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert all(["invalid value encountered in "
in str(warn.message) for warn in w])
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert is_increasing
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert max(y1) == max(y2)
assert min(y1) == min(y2)
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert sum(np.isnan(y1)) == 2
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert all_predictions_finite
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(rng.rand(n_samples),
expit(X_train)).astype('int64').astype('float64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
def test_isotonic_dtype():
y = [2, 1, 4, 3, 5]
weights = np.array([.9, .9, .9, .9, .9], dtype=np.float64)
reg = IsotonicRegression()
for dtype in (np.int32, np.int64, np.float32, np.float64):
for sample_weight in (None, weights.astype(np.float32), weights):
y_np = np.array(y, dtype=dtype)
expected_dtype = \
check_array(y_np, dtype=[np.float64, np.float32],
ensure_2d=False).dtype
res = isotonic_regression(y_np, sample_weight=sample_weight)
assert res.dtype == expected_dtype
X = np.arange(len(y)).astype(dtype)
reg.fit(X, y_np, sample_weight=sample_weight)
res = reg.predict(X)
assert res.dtype == expected_dtype
def test_make_unique_dtype():
x_list = [2, 2, 2, 3, 5]
for dtype in (np.float32, np.float64):
x = np.array(x_list, dtype=dtype)
y = x.copy()
w = np.ones_like(x)
x, y, w = _make_unique(x, y, w)
assert_array_equal(x, [2, 3, 5])
| bsd-3-clause |
AlfredNeverKog/BrainCarya | src/my/kadenze/lesson4/inception_graph.py | 1 | 2023 | import numpy as np
import tensorflow as tf
from PIL import Image
from scipy import misc
from tensorflow.python.platform import gfile
import matplotlib.pyplot as plt
from src.my.lib.utils import montage_filters,montage
model, labels = ('./inception5h/tensorflow_inception_graph.pb',
'./inception5h/imagenet_comp_graph_label_strings.txt')
import time
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def=graph_def,name='inception')
#[print(a.name) for a in g.get_operations()]
g = tf.get_default_graph()
im = np.array(misc.imresize(Image.open('./data/coffee.jpg'),(200,200))[:,:,:3])
im_4d = im[np.newaxis]
x = g.get_tensor_by_name('inception/input:0')
def Classification():
last_layer = g.get_tensor_by_name('inception/output2:0')
with tf.Session() as sess:
res = np.squeeze(sess.run(last_layer,feed_dict={x: im_4d}))
txt = open(labels).readlines()
labels_val = [(key, val.strip()) for key, val in enumerate(txt)]
print([(labels_val[i], res[i]) for i in res.argsort()[::-1][:5]])
def ConvFilters():
with tf.Session() as sess:
W = g.get_tensor_by_name('inception/conv2d0_w:0')
W_eval = sess.run(W)
Ws = np.array([montage_filters(W_eval[:, :, [i], :]) for i in range(3)])
a = np.rollaxis(Ws, 0, 3) #(n1,n2,n3) -> (n3,n2,n1)
#Filters
plt.imshow(((a / np.max(np.abs(a))) * 128 + 128).astype(np.uint), interpolation='nearest')
plt.show()
def Convs():
with tf.Session() as sess:
tensor = g.get_tensor_by_name('inception/conv2d0_pre_relu:0')
start = time.time()
conv =sess.run(tensor,feed_dict={x:im_4d})
print('conv_time %s'%(time.time() - start))
print(conv.shape)
start = time.time()
mtage = montage(np.array([conv[0,:,:,i] for i in range(64)] ))
print('montage_time %s' % (time.time() - start))
plt.imshow(mtage)
plt.show()
Convs() | mit |
pypot/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/inspection/tests/test_permutation_importance.py | 5 | 19332 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
get_scorer,
mean_squared_error,
r2_score,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_diabetes(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_robustness_to_high_cardinality_noisy_feature(n_jobs, seed=42):
# Permutation variable importance should not be affected by the high
# cardinality bias of traditional feature importances, especially when
# computed on a held-out test set:
rng = np.random.RandomState(seed)
n_repeats = 5
n_samples = 1000
n_classes = 5
n_informative_features = 2
n_noise_features = 1
n_features = n_informative_features + n_noise_features
# Generate a multiclass classification dataset and a set of informative
# binary features that can be used to predict some classes of y exactly
# while leaving some classes unexplained to make the problem harder.
classes = np.arange(n_classes)
y = rng.choice(classes, size=n_samples)
X = np.hstack([(y == c).reshape(-1, 1)
for c in classes[:n_informative_features]])
X = X.astype(np.float32)
# Not all target classes are explained by the binary class indicator
# features:
assert n_informative_features < n_classes
# Add 10 other noisy features with high cardinality (numerical) values
# that can be used to overfit the training data.
X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1)
assert X.shape == (n_samples, n_features)
# Split the dataset to be able to evaluate on a held-out test set. The
# Test size should be large enough for importance measurements to be
# stable:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=rng)
clf = RandomForestClassifier(n_estimators=5, random_state=rng)
clf.fit(X_train, y_train)
# Variable importances computed by impurity decrease on the tree node
# splits often use the noisy features in splits. This can give misleading
# impression that high cardinality noisy variables are the most important:
tree_importances = clf.feature_importances_
informative_tree_importances = tree_importances[:n_informative_features]
noisy_tree_importances = tree_importances[n_informative_features:]
assert informative_tree_importances.max() < noisy_tree_importances.min()
# Let's check that permutation-based feature importances do not have this
# problem.
r = permutation_importance(clf, X_test, y_test, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert r.importances.shape == (X.shape[1], n_repeats)
# Split the importances between informative and noisy features
informative_importances = r.importances_mean[:n_informative_features]
noisy_importances = r.importances_mean[n_informative_features:]
# Because we do not have a binary variable explaining each target classes,
# the RF model will have to use the random variable to make some
# (overfitting) splits (as max_depth is not set). Therefore the noisy
# variables will be non-zero but with small values oscillating around
# zero:
assert max(np.abs(noisy_importances)) > 1e-7
assert noisy_importances.max() < 0.05
# The binary features correlated with y should have a higher importance
# than the high cardinality noisy features.
# The maximum test accuracy is 2 / 5 == 0.4, each informative feature
# contributing approximately a bit more than 0.2 of accuracy.
assert informative_importances.min() > 0.15
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
rng = np.random.RandomState(1)
n_samples = 1000
n_features = 2
n_half_samples = n_samples // 2
x = rng.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01)
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == pytest.approx(
x1_x2_imp_ratio_w_none, 0.01)
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01)
def test_permutation_importance_no_weights_scoring_function():
# Creating a scorer function that does not takes sample_weight
def my_scorer(estimator, X, y):
return 1
# Creating some data and estimator for the permutation test
x = np.array([[1, 2], [3, 4]])
y = np.array([1, 2])
w = np.array([1, 1])
lr = LinearRegression()
lr.fit(x, y)
# test that permutation_importance does not return error when
# sample_weight is None
try:
permutation_importance(lr, x, y, random_state=1,
scoring=my_scorer,
n_repeats=1)
except TypeError:
pytest.fail("permutation_test raised an error when using a scorer "
"function that does not accept sample_weight even though "
"sample_weight was None")
# test that permutation_importance raise exception when sample_weight is
# not None
with pytest.raises(TypeError):
permutation_importance(lr, x, y, random_state=1,
scoring=my_scorer,
n_repeats=1,
sample_weight=w)
@pytest.mark.parametrize(
"list_single_scorer, multi_scorer",
[
(["r2", "neg_mean_squared_error"], ["r2", "neg_mean_squared_error"]),
(
["r2", "neg_mean_squared_error"],
{
"r2": get_scorer("r2"),
"neg_mean_squared_error": get_scorer("neg_mean_squared_error"),
},
),
(
["r2", "neg_mean_squared_error"],
lambda estimator, X, y: {
"r2": r2_score(y, estimator.predict(X)),
"neg_mean_squared_error": -mean_squared_error(
y, estimator.predict(X)
),
},
),
],
)
def test_permutation_importance_multi_metric(list_single_scorer, multi_scorer):
# Test permutation importance when scoring contains multiple scorers
# Creating some data and estimator for the permutation test
x, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(x, y)
multi_importance = permutation_importance(
lr, x, y, random_state=1, scoring=multi_scorer, n_repeats=2
)
assert set(multi_importance.keys()) == set(list_single_scorer)
for scorer in list_single_scorer:
multi_result = multi_importance[scorer]
single_result = permutation_importance(
lr, x, y, random_state=1, scoring=scorer, n_repeats=2
)
assert_allclose(multi_result.importances, single_result.importances)
| bsd-3-clause |
ishanic/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
manns/pyspread | pyspread/model/model.py | 1 | 51023 | # -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
The model contains the core data structures of pyspread and is divided
into the following layers.
- Layer 3: :class:`CodeArray`
- Layer 2: :class:`DataArray`
- Layer 1: :class:`DictGrid`
- Layer 0: :class:`KeyValueStore`
**Provides**
* :class:`DefaultCellAttributeDict`
* :class:`CellAttribute`
* :class:`CellAttributes`
* :class:`KeyValueStore`
* :class:`DictGrid`
* :class:`DataArray`
* :class:`CodeArray`
"""
from __future__ import absolute_import
from builtins import filter
from builtins import str
from builtins import zip
from builtins import range
import ast
import base64
import bz2
from collections import defaultdict
from copy import copy
import datetime
from importlib import reload
from inspect import isgenerator
import io
from itertools import product
import re
import signal
import sys
from traceback import print_exception
from typing import (
Any, Dict, Iterable, List, NamedTuple, Sequence, Tuple, Union)
import numpy
from PyQt5.QtGui import QImage, QPixmap
try:
from matplotlib.figure import Figure
except ImportError:
Figure = None
try:
from pyspread.settings import Settings
from pyspread.lib.attrdict import AttrDict
import pyspread.lib.charts as charts
from pyspread.lib.exception_handling import get_user_codeframe
from pyspread.lib.typechecks import is_stringlike
from pyspread.lib.selection import Selection
except ImportError:
from settings import Settings
from lib.attrdict import AttrDict
import lib.charts as charts
from lib.exception_handling import get_user_codeframe
from lib.typechecks import is_stringlike
from lib.selection import Selection
class DefaultCellAttributeDict(AttrDict):
"""Holds default values for all cell attributes"""
def __init__(self):
super().__init__(self)
self.borderwidth_bottom = 1
self.borderwidth_right = 1
self.bordercolor_bottom = None
self.bordercolor_right = None
self.bgcolor = 255, 255, 255 # Do not use theme
self.textfont = None
self.pointsize = 10
self.fontweight = None
self.fontstyle = None
self.textcolor = 0, 0, 0 # Do not use theme
self.underline = False
self.strikethrough = False
self.locked = False
self.angle = 0.0
self.vertical_align = "align_top"
self.justification = "justify_left"
self.frozen = False
self.merge_area = None
self.renderer = "text"
self.button_cell = False
self.panel_cell = False
class CellAttribute(NamedTuple):
"""Single cell attribute"""
selection: Selection
table: int
attr: AttrDict
class CellAttributes(list):
"""Stores cell formatting attributes in a list of CellAttribute instances
The class stores cell attributes as a list of layers.
Each layer describes attributes for one selection in one table.
Ultimately, a cell's attributes are determined by going through all
elements of an `CellAttributes` instance. A default `AttrDict` is updated
with the one in the list element if it is relevant for the respective cell.
Therefore, attributes are efficiently stored for large sets of cells.
The class provides attribute read access to single cells via
:meth:`__getitem__`.
Otherwise it behaves similar to a `list`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__add__ = None
self.__delattr__ = None
self.__delitem__ = None
self.__delslice__ = None
self.__iadd__ = None
self.__imul__ = None
self.__rmul__ = None
self.__setattr__ = None
self.__setslice__ = None
self.insert = None
self.remove = None
self.reverse = None
self.sort = None
# Cache for __getattr__ maps key to tuple of len and attr_dict
_attr_cache = AttrDict()
_table_cache = {}
def append(self, cell_attribute: CellAttribute):
"""append that clears caches
:param cell_attribute: Cell attribute to be appended
"""
if not isinstance(cell_attribute, CellAttribute):
msg = "{} not instance of CellAttribute".format(cell_attribute)
raise Warning(msg)
return
# We need to clean up merge areas
selection, table, attr = cell_attribute
if "merge_area" in attr:
for i, ele in enumerate(reversed(self)):
if ele[0] == selection and ele[1] == table \
and "merge_area" in ele[2]:
try:
self.pop(-1 - i)
except IndexError:
pass
if attr["merge_area"] is not None:
super().append(cell_attribute)
else:
super().append(cell_attribute)
self._attr_cache.clear()
self._table_cache.clear()
def __getitem__(self, key: Tuple[int, int, int]) -> AttrDict:
"""Returns attribute dict for a single key
:param key: Key of cell for cell_attribute retrieval
"""
# if any(isinstance(key_ele, slice) for key_ele in key):
# raise Warning("slice in key {}".format(key))
# return
try:
cache_len, cache_dict = self._attr_cache[key]
# Use cache result only if no new attrs have been defined
if cache_len == len(self):
return cache_dict
except KeyError:
pass
# Update table cache if it is outdated (e.g. when creating a new grid)
if len(self) != self._len_table_cache():
self._update_table_cache()
row, col, tab = key
result_dict = DefaultCellAttributeDict()
try:
for selection, attr_dict in self._table_cache[tab]:
if (row, col) in selection:
result_dict.update(attr_dict)
except KeyError:
pass
# Upddate cache with current length and dict
self._attr_cache[key] = (len(self), result_dict)
return result_dict
def __setitem__(self, index: int, cell_attribute: CellAttribute):
"""__setitem__ that clears caches
:param index: Index of item in self
:param cell_attribute: Cell attribute to be set
"""
if not isinstance(cell_attribute, CellAttribute):
msg = "{} not instance of CellAttribute".format(cell_attribute)
raise Warning(msg)
return
super().__setitem__(index, cell_attribute)
self._attr_cache.clear()
self._table_cache.clear()
def _len_table_cache(self) -> int:
"""Returns the length of the table cache"""
length = 0
for table in self._table_cache:
length += len(self._table_cache[table])
return length
def _update_table_cache(self):
"""Clears and updates the table cache to be in sync with self"""
self._table_cache.clear()
for sel, tab, val in self:
try:
self._table_cache[tab].append((sel, val))
except KeyError:
self._table_cache[tab] = [(sel, val)]
if len(self) != self._len_table_cache():
raise Warning("Length of _table_cache does not match")
def get_merging_cell(self,
key: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Returns key of cell that merges the cell key
Retuns None if cell key not merged.
:param key: Key of the cell that is merged
"""
row, col, tab = key
# Is cell merged
for selection, table, attr in self:
if tab == table and "merge_area" in attr:
top, left, bottom, right = attr["merge_area"]
if top <= row <= bottom and left <= col <= right:
return top, left, tab
def for_table(self, table: int) -> list:
"""Return cell attributes for a given table
Return type should be `CellAttributes`. The list return type is
provided because PEP 563 is unavailable in Python 3.6.
Note that the table's presence in the grid is not checked.
:param table: Table for which cell attributes are returned
"""
table_cell_attributes = CellAttributes()
for selection, __table, attr in self:
if __table == table:
cell_attribute = CellAttribute(selection, __table, attr)
table_cell_attributes.append(cell_attribute)
return table_cell_attributes
# End of class CellAttributes
class KeyValueStore(dict):
"""Key-Value store in memory. Currently a dict with default value None.
This class represents layer 0 of the model.
"""
def __init__(self, default_value=None):
"""
:param default_value: Value that is provided for missing keys
"""
super().__init__()
self.default_value = default_value
def __missing__(self, value: Any) -> Any:
"""Returns the default value None"""
return self.default_value
# End of class KeyValueStore
# -----------------------------------------------------------------------------
class DictGrid(KeyValueStore):
"""Core data class with all information that is stored in a `.pys` file.
Besides grid code access via standard `dict` operations, it provides
the following attributes:
* :attr:`~DictGrid.cell_attributes` - Stores cell formatting attributes
* :attr:`~DictGrid.macros` - String of all macros
This class represents layer 1 of the model.
"""
def __init__(self, shape: Tuple[int, int, int]):
"""
:param shape: Shape of the grid
"""
super().__init__()
self.shape = shape
# Instance of :class:`CellAttributes`
self.cell_attributes = CellAttributes()
# Macros as string
self.macros = u""
self.row_heights = defaultdict(float) # Keys have format (row, table)
self.col_widths = defaultdict(float) # Keys have format (col, table)
def __getitem__(self, key: Tuple[int, int, int]) -> Any:
"""
:param key: Cell key
"""
shape = self.shape
for axis, key_ele in enumerate(key):
if shape[axis] <= key_ele or key_ele < -shape[axis]:
msg = "Grid index {key} outside grid shape {shape}."
msg = msg.format(key=key, shape=shape)
raise IndexError(msg)
return super().__getitem__(key)
def __missing__(self, key):
"""Default value is None"""
return
# End of class DictGrid
# -----------------------------------------------------------------------------
class DataArray:
"""DataArray provides enhanced grid read/write access.
Enhancements comprise:
* Slicing
* Multi-dimensional operations such as insertion and deletion along one
axis
This class represents layer 2 of the model.
"""
def __init__(self, shape: Tuple[int, int, int], settings: Settings):
"""
:param shape: Shape of the grid
:param settings: Pyspread settings
"""
self.dict_grid = DictGrid(shape)
self.settings = settings
def __eq__(self, other) -> bool:
if not hasattr(other, "dict_grid") or \
not hasattr(other, "cell_attributes"):
return False
return self.dict_grid == other.dict_grid and \
self.cell_attributes == other.cell_attributes
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def data(self) -> dict:
"""Returns `dict` of data content.
- Data is the central content interface for loading / saving data.
- It shall be used for loading and saving from and to `.pys` and other
files.
- It shall be used for loading and saving macros.
- However, it is not used for importing and exporting data because
these operations are partial to the grid.
**Content of returned dict**
:param shape: Grid shape
:type shape: Tuple[int, int, int]
:param grid: Cell content
:type grid: Dict[Tuple[int, int, int], str]
:param attributes: Cell attributes
:type attributes: CellAttribute
:param row_heights: Row heights
:type row_heights: defaultdict[Tuple[int, int], float]
:param col_widths: Column widths
:type col_widths: defaultdict[Tuple[int, int], float]
:param macros: Macros
:type macros: str
"""
data = {}
data["shape"] = self.shape
data["grid"] = {}.update(self.dict_grid)
data["attributes"] = self.cell_attributes[:]
data["row_heights"] = self.row_heights
data["col_widths"] = self.col_widths
data["macros"] = self.macros
return data
@data.setter
def data(self, **kwargs):
"""Sets data from given parameters
Old values are deleted.
If a paremeter is not given, nothing is changed.
**Content of kwargs dict**
:param shape: Grid shape
:type shape: Tuple[int, int, int]
:param grid: Cell content
:type grid: Dict[Tuple[int, int, int], str]
:param attributes: Cell attributes
:type attributes: CellAttribute
:param row_heights: Row heights
:type row_heights: defaultdict[Tuple[int, int], float]
:param col_widths: Column widths
:type col_widths: defaultdict[Tuple[int, int], float]
:param macros: Macros
:type macros: str
"""
if "shape" in kwargs:
self.shape = kwargs["shape"]
if "grid" in kwargs:
self.dict_grid.clear()
self.dict_grid.update(kwargs["grid"])
if "attributes" in kwargs:
self.attributes[:] = kwargs["attributes"]
if "row_heights" in kwargs:
self.row_heights = kwargs["row_heights"]
if "col_widths" in kwargs:
self.col_widths = kwargs["col_widths"]
if "macros" in kwargs:
self.macros = kwargs["macros"]
@property
def row_heights(self) -> defaultdict:
"""row_heights interface to dict_grid"""
return self.dict_grid.row_heights
@row_heights.setter
def row_heights(self, row_heights: defaultdict):
"""row_heights interface to dict_grid"""
self.dict_grid.row_heights = row_heights
@property
def col_widths(self) -> defaultdict:
"""col_widths interface to dict_grid"""
return self.dict_grid.col_widths
@col_widths.setter
def col_widths(self, col_widths: defaultdict):
"""col_widths interface to dict_grid"""
self.dict_grid.col_widths = col_widths
@property
def cell_attributes(self) -> CellAttributes:
"""cell_attributes interface to dict_grid"""
return self.dict_grid.cell_attributes
@cell_attributes.setter
def cell_attributes(self, value: CellAttributes):
"""cell_attributes interface to dict_grid"""
# First empty cell_attributes
self.cell_attributes[:] = []
self.cell_attributes.extend(value)
@property
def macros(self) -> str:
"""macros interface to dict_grid"""
return self.dict_grid.macros
@macros.setter
def macros(self, macros: str):
"""Sets macros string"""
self.dict_grid.macros = macros
@property
def shape(self) -> Tuple[int, int, int]:
"""Returns dict_grid shape"""
return self.dict_grid.shape
@shape.setter
def shape(self, shape: Tuple[int, int, int]):
"""Deletes all cells beyond new shape and sets dict_grid shape
Returns a dict of the deleted cells' contents
:param shape: Target shape for grid
"""
# Delete each cell that is beyond new borders
old_shape = self.shape
deleted_cells = {}
if any(new_axis < old_axis
for new_axis, old_axis in zip(shape, old_shape)):
for key in list(self.dict_grid.keys()):
if any(key_ele >= new_axis
for key_ele, new_axis in zip(key, shape)):
deleted_cells[key] = self.pop(key)
# Set dict_grid shape attribute
self.dict_grid.shape = shape
self._adjust_rowcol(0, 0, 0)
self._adjust_cell_attributes(0, 0, 0)
return deleted_cells
def __iter__(self) -> Iterable:
"""Returns iterator over self.dict_grid"""
return iter(self.dict_grid)
def __contains__(self, key: Tuple[int, int, int]) -> bool:
"""True if key is contained in grid
Handles single keys only.
:param key: Key of cell to be checked
"""
if any(not isinstance(ele, int) for ele in key):
return NotImplemented
row, column, table = key
rows, columns, tables = self.shape
return (0 <= row <= rows
and 0 <= column <= columns
and 0 <= table <= tables)
# Slice support
def __getitem__(self, key: Tuple[Union[int, slice], Union[int, slice],
Union[int, slice]]
) -> Union[str, Iterable[str], Iterable[Iterable[str]],
Iterable[Iterable[Iterable[str]]]]:
"""Adds slicing access to cell code retrieval
The cells are returned as a generator of generators, of ... of unicode.
:param key: Keys of the cell code that is returned
Note
----
Classical Excel type addressing (A$1, ...) may be added here later
"""
for key_ele in key:
if isinstance(key_ele, slice):
# We have something slice-like here
return self.cell_array_generator(key)
if is_stringlike(key_ele):
# We have something string-like here
msg = "Cell string based access not implemented"
raise NotImplementedError(msg)
# key_ele should be a single cell
return self.dict_grid[key]
def __setitem__(self, key: Tuple[Union[int, slice], Union[int, slice],
Union[int, slice]], value: str):
"""Accepts index and slice keys
:param key: Cell key(s) that shall be set
:param value: Code for cell(s) to be set
"""
single_keys_per_dim = []
for axis, key_ele in enumerate(key):
if isinstance(key_ele, slice):
# We have something slice-like here
length = key[axis]
slice_range = range(*key_ele.indices(length))
single_keys_per_dim.append(slice_range)
elif is_stringlike(key_ele):
# We have something string-like here
raise NotImplementedError
else:
# key_ele is a single cell
single_keys_per_dim.append((key_ele, ))
single_keys = product(*single_keys_per_dim)
for single_key in single_keys:
if value:
# Never change merged cells
merging_cell = \
self.cell_attributes.get_merging_cell(single_key)
if merging_cell is None or merging_cell == single_key:
self.dict_grid[single_key] = value
else:
# Value is empty --> delete cell
try:
self.pop(key)
except (KeyError, TypeError):
pass
# Pickle support
def __getstate__(self) -> Dict[str, DictGrid]:
"""Returns dict_grid for pickling
Note that all persistent data is contained in the DictGrid class
"""
return {"dict_grid": self.dict_grid}
def get_row_height(self, row: int, tab: int) -> float:
"""Returns row height
:param row: Row for which height is retrieved
:param tab: Table for which for which row height is retrieved
"""
try:
return self.row_heights[(row, tab)]
except KeyError:
return
def get_col_width(self, col: int, tab: int) -> float:
"""Returns column width
:param col: Column for which width is retrieved
:param tab: Table for which for which column width is retrieved
"""
try:
return self.col_widths[(col, tab)]
except KeyError:
return
def keys(self) -> List[Tuple[int, int, int]]:
"""Returns keys in self.dict_grid"""
return list(self.dict_grid.keys())
def pop(self, key: Tuple[int, int, int]) -> Any:
"""dict_grid pop wrapper
:param key: Cell key
"""
return self.dict_grid.pop(key)
def get_last_filled_cell(self, table: int = None) -> Tuple[int, int, int]:
"""Returns key for the bottommost rightmost cell with content
:param table: Limit search to this table
"""
maxrow = 0
maxcol = 0
for row, col, tab in self.dict_grid:
if table is None or tab == table:
maxrow = max(row, maxrow)
maxcol = max(col, maxcol)
return maxrow, maxcol, table
def cell_array_generator(self,
key: Tuple[Union[int, slice], Union[int, slice],
Union[int, slice]]) -> Iterable[str]:
"""Generator traversing cells specified in key
Yields cells' contents.
:param key: Specifies the cell keys of the generator
"""
for i, key_ele in enumerate(key):
# Get first element of key that is a slice
if isinstance(key_ele, slice):
slc_keys = range(*key_ele.indices(self.dict_grid.shape[i]))
key_list = list(key)
key_list[i] = None
has_subslice = any(isinstance(ele, slice) for ele in key_list)
for slc_key in slc_keys:
key_list[i] = slc_key
if has_subslice:
# If there is a slice left yield generator
yield self.cell_array_generator(key_list)
else:
# No slices? Yield value
yield self[tuple(key_list)]
break
def _shift_rowcol(self, insertion_point: int, no_to_insert: int):
"""Shifts row and column sizes when a table is inserted or deleted
:param insertion_point: Table at which a new table is inserted
:param no_to_insert: Number of tables that are inserted
"""
# Shift row heights
new_row_heights = {}
del_row_heights = []
for row, tab in self.row_heights:
if tab >= insertion_point:
new_row_heights[(row, tab + no_to_insert)] = \
self.row_heights[(row, tab)]
del_row_heights.append((row, tab))
for row, tab in new_row_heights:
self.set_row_height(row, tab, new_row_heights[(row, tab)])
for row, tab in del_row_heights:
if (row, tab) not in new_row_heights:
self.set_row_height(row, tab, None)
# Shift column widths
new_col_widths = {}
del_col_widths = []
for col, tab in self.col_widths:
if tab >= insertion_point:
new_col_widths[(col, tab + no_to_insert)] = \
self.col_widths[(col, tab)]
del_col_widths.append((col, tab))
for col, tab in new_col_widths:
self.set_col_width(col, tab, new_col_widths[(col, tab)])
for col, tab in del_col_widths:
if (col, tab) not in new_col_widths:
self.set_col_width(col, tab, None)
def _adjust_rowcol(self, insertion_point: int, no_to_insert: int,
axis: int, tab: int = None):
"""Adjusts row and column sizes on insertion/deletion
:param insertion_point: Point on axis at which insertion takes place
:param no_to_insert: Number of rows or columns that are inserted
:param axis: Row insertion if 0, column insertion if 1, must be in 0, 1
:param tab: Table at which insertion takes place, None means all tables
"""
if axis == 2:
self._shift_rowcol(insertion_point, no_to_insert)
return
if axis not in (0, 1):
raise Warning("Axis {} not in (0, 1)".format(axis))
return
cell_sizes = self.col_widths if axis else self.row_heights
set_cell_size = self.set_col_width if axis else self.set_row_height
new_sizes = {}
del_sizes = []
for pos, table in cell_sizes:
if pos >= insertion_point and (tab is None or tab == table):
if 0 <= pos + no_to_insert < self.shape[axis]:
new_sizes[(pos + no_to_insert, table)] = \
cell_sizes[(pos, table)]
if pos < insertion_point + no_to_insert:
new_sizes[(pos, table)] = cell_sizes[(pos, table)]
del_sizes.append((pos, table))
for pos, table in new_sizes:
set_cell_size(pos, table, new_sizes[(pos, table)])
for pos, table in del_sizes:
if (pos, table) not in new_sizes:
set_cell_size(pos, table, None)
def _adjust_merge_area(
self, attrs: AttrDict, insertion_point: int, no_to_insert: int,
axis: int) -> Tuple[int, int, int, int]:
"""Returns an updated merge area
:param attrs: Cell attribute dictionary that shall be adjusted
:param insertion_point: Point on axis at which insertion takes place
:param no_to_insert: Number of rows/cols/tabs to be inserted (>=0)
:param axis: Row insertion if 0, column insertion if 1, must be in 0, 1
"""
if axis not in (0, 1):
raise Warning("Axis {} not in (0, 1)".format(axis))
return
if "merge_area" not in attrs or attrs["merge_area"] is None:
return
top, left, bottom, right = attrs["merge_area"]
selection = Selection([(top, left)], [(bottom, right)], [], [], [])
selection.insert(insertion_point, no_to_insert, axis)
__top, __left = selection.block_tl[0]
__bottom, __right = selection.block_br[0]
# Adjust merge area if it is beyond the grid shape
rows, cols, tabs = self.shape
if __top < 0 and __bottom < 0:
return
if __top >= rows and __bottom >= rows:
return
if __left < 0 and __right < 0:
return
if __left >= cols and __right >= cols:
return
if __top < 0:
__top = 0
if __top >= rows:
__top = rows - 1
if __bottom < 0:
__bottom = 0
if __bottom >= rows:
__bottom = rows - 1
if __left < 0:
__left = 0
if __left >= cols:
__left = cols - 1
if __right < 0:
__right = 0
if __right >= cols:
__right = cols - 1
return __top, __left, __bottom, __right
def _adjust_cell_attributes(
self, insertion_point: int, no_to_insert: int, axis: int,
tab: int = None, cell_attrs: AttrDict = None):
"""Adjusts cell attributes on insertion/deletion
:param insertion_point: Point on axis at which insertion takes place
:param no_to_insert: Number of rows/cols/tabs to be inserted (>=0)
:param axis: Row insertion if 0, column insertion if 1, must be in 0, 1
:param tab: Table at which insertion takes place, None means all tables
:param cell_attrs: If given replaces the existing cell attributes
"""
def replace_cell_attributes_table(index: int, new_table: int):
"""Replaces table in cell_attributes item
:param index: Cell attribute index for table replacement
:param new_table: New table value for cell attribute
"""
cell_attr = list(list.__getitem__(self.cell_attributes, index))
cell_attr[1] = new_table
self.cell_attributes[index] = CellAttribute(*cell_attr)
def get_ca_with_updated_ma(
attrs: AttrDict,
merge_area: Tuple[int, int, int, int]) -> AttrDict:
"""Returns cell attributes with updated merge area
:param attrs: Cell attributes to be updated
:param merge_area: New merge area (top, left, bottom, right)
"""
new_attrs = copy(attrs)
if merge_area is None:
try:
new_attrs.pop("merge_area")
except KeyError:
pass
else:
new_attrs["merge_area"] = merge_area
return new_attrs
if axis not in list(range(3)):
raise ValueError("Axis must be in [0, 1, 2]")
if tab is not None and tab < 0:
raise Warning("tab is negative")
return
if cell_attrs is None:
cell_attrs = []
if cell_attrs:
self.cell_attributes[:] = cell_attrs
elif axis < 2:
# Adjust selections on given table
ca_updates = {}
for i, (selection, table, attrs) \
in enumerate(self.cell_attributes):
selection = copy(selection)
if tab is None or tab == table:
selection.insert(insertion_point, no_to_insert, axis)
# Update merge area if present
merge_area = self._adjust_merge_area(attrs,
insertion_point,
no_to_insert, axis)
new_attrs = get_ca_with_updated_ma(attrs, merge_area)
ca_updates[i] = CellAttribute(selection, table, new_attrs)
for idx in ca_updates:
self.cell_attributes[idx] = ca_updates[idx]
elif axis == 2:
# Adjust tabs
pop_indices = []
for i, cell_attribute in enumerate(self.cell_attributes):
selection, table, value = cell_attribute
if no_to_insert < 0 and insertion_point <= table:
if insertion_point > table + no_to_insert:
# Delete later
pop_indices.append(i)
else:
replace_cell_attributes_table(i, table + no_to_insert)
elif insertion_point <= table:
# Insert
replace_cell_attributes_table(i, table + no_to_insert)
for i in pop_indices[::-1]:
self.cell_attributes.pop(i)
self.cell_attributes._attr_cache.clear()
self.cell_attributes._update_table_cache()
def insert(self, insertion_point: int, no_to_insert: int, axis: int,
tab: int = None):
"""Inserts no_to_insert rows/cols/tabs/... before insertion_point
:param insertion_point: Point on axis at which insertion takes place
:param no_to_insert: Number of rows/cols/tabs to be inserted (>=0)
:param axis: Row/Column/Table insertion if 0/1/2 must be in 0, 1, 2
:param tab: Table at which insertion takes place, None means all tables
"""
if not 0 <= axis <= len(self.shape):
raise ValueError("Axis not in grid dimensions")
if insertion_point > self.shape[axis] or \
insertion_point < -self.shape[axis]:
raise IndexError("Insertion point not in grid")
new_keys = {}
del_keys = []
for key in list(self.dict_grid.keys()):
if key[axis] >= insertion_point and (tab is None or tab == key[2]):
new_key = list(key)
new_key[axis] += no_to_insert
if 0 <= new_key[axis] < self.shape[axis]:
new_keys[tuple(new_key)] = self(key)
del_keys.append(key)
# Now re-insert moved keys
for key in del_keys:
if key not in new_keys and self(key) is not None:
self.pop(key)
self._adjust_rowcol(insertion_point, no_to_insert, axis, tab=tab)
self._adjust_cell_attributes(insertion_point, no_to_insert, axis, tab)
for key in new_keys:
self.__setitem__(key, new_keys[key])
def delete(self, deletion_point: int, no_to_delete: int, axis: int,
tab: int = None):
"""Deletes no_to_delete rows/cols/... starting with deletion_point
:param deletion_point: Point on axis at which deletion takes place
:param no_to_delete: Number of rows/cols/tabs to be deleted (>=0)
:param axis: Row/Column/Table deletion if 0/1/2, must be in 0, 1, 2
:param tab: Table at which insertion takes place, None means all tables
"""
if not 0 <= axis < len(self.shape):
raise ValueError("Axis not in grid dimensions")
if no_to_delete < 0:
raise ValueError("Cannot delete negative number of rows/cols/...")
if deletion_point > self.shape[axis] or \
deletion_point <= -self.shape[axis]:
raise IndexError("Deletion point not in grid")
new_keys = {}
del_keys = []
# Note that the loop goes over a list that copies all dict keys
for key in list(self.dict_grid.keys()):
if tab is None or tab == key[2]:
if deletion_point <= key[axis] < deletion_point + no_to_delete:
del_keys.append(key)
elif key[axis] >= deletion_point + no_to_delete:
new_key = list(key)
new_key[axis] -= no_to_delete
new_keys[tuple(new_key)] = self(key)
del_keys.append(key)
# Now re-insert moved keys
for key in new_keys:
self.__setitem__(key, new_keys[key])
for key in del_keys:
if key not in new_keys and self(key) is not None:
self.pop(key)
self._adjust_rowcol(deletion_point, -no_to_delete, axis, tab=tab)
self._adjust_cell_attributes(deletion_point, -no_to_delete, axis, tab)
def set_row_height(self, row: int, tab: int, height: float):
"""Sets row height
:param row: Row for height setting
:param tab: Table, in which row height is set
:param height: Row height to be set
"""
try:
self.row_heights.pop((row, tab))
except KeyError:
pass
if height is not None:
self.row_heights[(row, tab)] = float(height)
def set_col_width(self, col: int, tab: int, width: float):
"""Sets column width
:param col: Column for width setting
:param tab: Table, in which column width is set
:param width: Column width to be set
"""
try:
self.col_widths.pop((col, tab))
except KeyError:
pass
if width is not None:
self.col_widths[(col, tab)] = float(width)
# Element access via call
__call__ = __getitem__
# End of class DataArray
# -----------------------------------------------------------------------------
class CodeArray(DataArray):
"""CodeArray provides objects when accessing cells via `__getitem__`
Cell code can be accessed via function call
This class represents layer 3 of the model.
"""
# Cache for results from __getitem__ calls
result_cache = {}
# Cache for frozen objects
frozen_cache = {}
# Custom font storage
custom_fonts = {}
# Safe mode: If True then Whether pyspread is operating in safe_mode
# In safe_mode, cells are not evaluated but its code is returned instead.
safe_mode = False
def __setitem__(self, key: Tuple[Union[int, slice], Union[int, slice],
Union[int, slice]], value: str):
"""Sets cell code and resets result cache
:param key: Cell key(s) that shall be set
:param value: Code for cell(s) to be set
"""
# Change numpy array repr function for grid cell results
numpy.set_string_function(lambda s: repr(s.tolist()))
# Prevent unchanged cells from being recalculated on cursor movement
repr_key = repr(key)
unchanged = (repr_key in self.result_cache and
value == self(key)) or \
((value is None or value == "") and
repr_key not in self.result_cache)
super().__setitem__(key, value)
if not unchanged:
# Reset result cache
self.result_cache = {}
def __getitem__(self, key: Tuple[Union[int, slice], Union[int, slice],
Union[int, slice]]) -> Any:
"""Returns _eval_cell
:param key: Cell key for result retrieval (code if in safe mode)
"""
if not any(isinstance(k, slice) for k in key):
# Button cell handling
if self.cell_attributes[key].button_cell is not False:
return
# Frozen cell handling
frozen_res = self.cell_attributes[key].frozen
if frozen_res:
if repr(key) in self.frozen_cache:
return self.frozen_cache[repr(key)]
# Frozen cache is empty.
# Maybe we have a reload without the frozen cache
result = self._eval_cell(key, self(key))
self.frozen_cache[repr(key)] = result
return result
# Normal cell handling
if repr(key) in self.result_cache:
return self.result_cache[repr(key)]
elif self(key) is not None:
result = self._eval_cell(key, self(key))
self.result_cache[repr(key)] = result
return result
def _make_nested_list(self, gen: Union[Iterable, Iterable[Iterable],
Iterable[Iterable[Iterable]]]
) -> Union[Sequence, Sequence[Sequence],
Sequence[Sequence[Sequence]]]:
"""Makes nested list from generator for creating numpy.array"""
res = []
for ele in gen:
if ele is None:
res.append(None)
elif not is_stringlike(ele) and isgenerator(ele):
# Nested generator
res.append(self._make_nested_list(ele))
else:
res.append(ele)
return res
def _get_updated_environment(self, env_dict: dict = None) -> dict:
"""Returns globals environment with 'magic' variable
:param env_dict: Maps global variable name to value, None: {'S': self}
"""
if env_dict is None:
env_dict = {'S': self}
env = globals().copy()
env.update(env_dict)
return env
def exec_then_eval(self, code: str,
_globals: dict = None, _locals: dict = None):
"""execs multuiline code and returns eval of last code line
:param code: Code to be executed / evaled
:param _globals: Globals dict for code execution and eval
:param _locals: Locals dict for code execution and eval
"""
if _globals is None:
_globals = {}
if _locals is None:
_locals = {}
block = ast.parse(code, mode='exec')
# assumes last node is an expression
last_body = block.body.pop()
last = ast.Expression(last_body.value)
exec(compile(block, '<string>', mode='exec'), _globals, _locals)
res = eval(compile(last, '<string>', mode='eval'), _globals, _locals)
if hasattr(last_body, "targets"):
for target in last_body.targets:
_globals[target.id] = res
globals().update(_globals)
return res
def _eval_cell(self, key: Tuple[int, int, int], code: str) -> Any:
"""Evaluates one cell and returns its result
:param key: Key of cell to be evaled
:param code: Code to be evaled
"""
# Flatten helper function
def nn(val: numpy.array) -> numpy.array:
"""Returns flat numpy array without None values"""
try:
return numpy.array([_f for _f in val.flat if _f is not None],
dtype="O")
except AttributeError:
# Probably no numpy array
return numpy.array([_f for _f in val if _f is not None],
dtype="O")
env_dict = {'X': key[0], 'Y': key[1], 'Z': key[2], 'bz2': bz2,
'base64': base64, 'nn': nn, 'Figure': Figure,
'R': key[0], 'C': key[1], 'T': key[2], 'S': self}
env = self._get_updated_environment(env_dict=env_dict)
if self.safe_mode:
# Safe mode is active
return code
if code is None:
# Cell is not present
return
if isgenerator(code):
# We have a generator object
return numpy.array(self._make_nested_list(code), dtype="O")
try:
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(self.settings.timeout)
except AttributeError:
# No Unix system
pass
try:
result = self.exec_then_eval(code, env, {})
except AttributeError as err:
# Attribute Error includes RunTimeError
result = AttributeError(err)
except RuntimeError as err:
result = RuntimeError(err)
except Exception as err:
result = Exception(err)
finally:
try:
signal.alarm(0)
except AttributeError:
# No POSIX system
pass
# Change back cell value for evaluation from other cells
# self.dict_grid[key] = _old_code
return result
def pop(self, key: Tuple[int, int, int]):
"""pop with cache support
:param key: Cell key that shall be popped
"""
try:
self.result_cache.pop(repr(key))
except KeyError:
pass
return super().pop(key)
def reload_modules(self):
"""Reloads modules that are available in cells"""
modules = [bz2, base64, re, ast, sys, numpy, datetime]
for module in modules:
reload(module)
def clear_globals(self):
"""Clears all newly assigned globals"""
base_keys = ['cStringIO', 'KeyValueStore', 'UnRedo', 'Figure',
'reload', 'io', 'print_exception', 'get_user_codeframe',
'isgenerator', 'is_stringlike', 'bz2', 'base64',
'__package__', 're', '__doc__', 'QPixmap', 'charts',
'product', 'AttrDict', 'CellAttribute', 'CellAttributes',
'DefaultCellAttributeDict', 'ast', '__builtins__',
'__file__', 'sys', '__name__', 'QImage', 'defaultdict',
'copy', 'imap', 'ifilter', 'Selection', 'DictGrid',
'numpy', 'CodeArray', 'DataArray', 'datetime', 'signal',
'Any', 'Dict', 'Iterable', 'List', 'NamedTuple',
'Sequence', 'Tuple', 'Union']
for key in list(globals().keys()):
if key not in base_keys:
globals().pop(key)
def get_globals(self) -> dict:
"""Returns globals dict"""
return globals()
def execute_macros(self) -> Tuple[str, str]:
"""Executes all macros and returns result string and error string
Executes macros only when not in safe_mode
"""
if self.safe_mode:
return '', "Safe mode activated. Code not executed."
# We need to execute each cell so that assigned globals are updated
for key in self:
self[key]
# Windows exec does not like Windows newline
self.macros = self.macros.replace('\r\n', '\n')
# Set up environment for evaluation
globals().update(self._get_updated_environment())
# Create file-like string to capture output
code_out = io.StringIO()
code_err = io.StringIO()
err_msg = io.StringIO()
# Capture output and errors
sys.stdout = code_out
sys.stderr = code_err
try:
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(self.settings.timeout)
except AttributeError:
# No POSIX system
pass
try:
exec(self.macros, globals())
try:
signal.alarm(0)
except AttributeError:
# No POSIX system
pass
except Exception:
exc_info = sys.exc_info()
user_tb = get_user_codeframe(exc_info[2]) or exc_info[2]
print_exception(exc_info[0], exc_info[1], user_tb, None, err_msg)
# Restore stdout and stderr
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
results = code_out.getvalue()
errs = code_err.getvalue() + err_msg.getvalue()
code_out.close()
code_err.close()
# Reset result cache
self.result_cache.clear()
# Reset frozen cache
self.frozen_cache.clear()
return results, errs
def _sorted_keys(self, keys: Iterable[Tuple[int, int, int]],
startkey: Tuple[int, int, int],
reverse: bool = False) -> Iterable[Tuple[int, int, int]]:
"""Generator that yields sorted keys starting with startkey
:param keys: Key sequence that is sorted
:param startkey: First key to be yielded
:param reverse: Sort direction reversed if True
"""
def tuple_key(tpl):
return tpl[::-1]
if reverse:
def tuple_cmp(tpl):
return tpl[::-1] > startkey[::-1]
else:
def tuple_cmp(tpl):
return tpl[::-1] < startkey[::-1]
searchkeys = sorted(keys, key=tuple_key, reverse=reverse)
searchpos = sum(1 for _ in filter(tuple_cmp, searchkeys))
searchkeys = searchkeys[searchpos:] + searchkeys[:searchpos]
for key in searchkeys:
yield key
def string_match(self, datastring: str, findstring: str, word: bool,
case: bool, regexp: bool) -> int:
"""Returns position of findstring in datastring or None if not found
:param datastring: String to be searched
:param findstring: Search string
:param word: Search full words only if True
:param case: Search case sensitively if True
:param regexp: Regular expression search if True
"""
if not isinstance(datastring, str): # Empty cell
return
if regexp:
match = re.search(findstring, datastring)
if match is None:
pos = -1
else:
pos = match.start()
else:
if not case:
datastring = datastring.lower()
findstring = findstring.lower()
if word:
pos = -1
matchstring = r'\b' + findstring + r'+\b'
for match in re.finditer(matchstring, datastring):
pos = match.start()
break # find 1st occurrance
else:
pos = datastring.find(findstring)
if pos == -1:
return None
return pos
def findnextmatch(self, startkey: Tuple[int, int, int], find_string: str,
up: bool = False, word: bool = False, case: bool = False,
regexp: bool = False, results: bool = True
) -> Tuple[int, int, int]:
"""Returns tuple with position of the next match of find_string or None
:param startkey: Start position of search
:param find_string: String to be searched for
:param up: Search up instead of down if True
:param word: Search full words only if True
:param case: Search case sensitively if True
:param regexp: Regular expression search if True
:param results: Search includes result string if True (slower)
"""
def is_matching(key, find_string, word, case, regexp):
code = self(key)
pos = self.string_match(code, find_string, word, case, regexp)
if results:
if pos is not None:
return True
r_str = str(self[key])
pos = self.string_match(r_str, find_string, word, case, regexp)
return pos is not None
# List of keys in sgrid in search order
table = startkey[2]
keys = [key for key in self.keys() if key[2] == table]
for key in self._sorted_keys(keys, startkey, reverse=up):
try:
if is_matching(key, find_string, word, case, regexp):
return key
except Exception:
# re errors are cryptical: sre_constants,...
pass
def handler(self, signum: Any, frame: Any):
"""Signal handler for timeout
:param signum: Ignored
:param frame: Ignored
"""
raise RuntimeError("Timeout after {} s.".format(self.settings.timeout))
# End of class CodeArray
| gpl-3.0 |
cbertinato/pandas | pandas/tests/frame/test_timezones.py | 1 | 7854 | """
Tests for DataFrame timezone-related methods
"""
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm
class TestDataFrameTimezones:
def test_frame_values_with_tz(self):
tz = "US/Central"
df = DataFrame({"A": date_range('2000', periods=4, tz=tz)})
result = df.values
expected = np.array([
[pd.Timestamp('2000-01-01', tz=tz)],
[pd.Timestamp('2000-01-02', tz=tz)],
[pd.Timestamp('2000-01-03', tz=tz)],
[pd.Timestamp('2000-01-04', tz=tz)],
])
tm.assert_numpy_array_equal(result, expected)
# two columns, homogenous
df = df.assign(B=df.A)
result = df.values
expected = np.concatenate([expected, expected], axis=1)
tm.assert_numpy_array_equal(result, expected)
# three columns, heterogenous
est = "US/Eastern"
df = df.assign(C=df.A.dt.tz_convert(est))
new = np.array([
[pd.Timestamp('2000-01-01T01:00:00', tz=est)],
[pd.Timestamp('2000-01-02T01:00:00', tz=est)],
[pd.Timestamp('2000-01-03T01:00:00', tz=est)],
[pd.Timestamp('2000-01-04T01:00:00', tz=est)],
])
expected = np.concatenate([expected, new], axis=1)
result = df.values
tm.assert_numpy_array_equal(result, expected)
def test_frame_from_records_utc(self):
rec = {'datum': 1.5,
'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index='begin_time')
def test_frame_tz_localize(self):
rng = date_range('1/1/2011', periods=100, freq='H')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_localize('utc')
expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))
assert result.index.tz.zone == 'UTC'
tm.assert_frame_equal(result, expected)
df = df.T
result = df.tz_localize('utc', axis=1)
assert result.columns.tz.zone == 'UTC'
tm.assert_frame_equal(result, expected.T)
def test_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('Europe/Berlin')
expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))
assert result.index.tz.zone == 'Europe/Berlin'
tm.assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('Europe/Berlin', axis=1)
assert result.columns.tz.zone == 'Europe/Berlin'
tm.assert_frame_equal(result, expected.T)
def test_frame_join_tzaware(self):
test1 = DataFrame(np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6,
freq="100L", tz="US/Central"))
test2 = DataFrame(np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3,
freq="250L", tz="US/Central"),
columns=range(3, 6))
result = test1.join(test2, how='outer')
ex_index = test1.index.union(test2.index)
tm.assert_index_equal(result.index, ex_index)
assert result.index.tz.zone == 'US/Central'
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng)), index=rng, columns=['a'])
df_moscow = df.tz_convert('Europe/Moscow')
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_frame_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# different timezones convert to UTC
# frame with frame
df1_central = df1.tz_convert('US/Central')
new1, new2 = df1.align(df1_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# frame with Series
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
df1[0].align(df1_central, axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern'])
def test_frame_no_datetime64_dtype(self, tz):
# after GH#7822
# these retain the timezones on dict construction
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(tz)
df = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo)
assert df['B'].dtype == tz_expected
# GH#2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({'dr': dr,
'dr_tz': dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz': datetimes_with_tz})
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern'])
def test_frame_reset_index(self, tz):
dr = date_range('2012-06-02', periods=10, tz=tz)
df = DataFrame(np.random.randn(len(dr)), dr)
roundtripped = df.reset_index().set_index('index')
xp = df.index.tz
rs = roundtripped.index.tz
assert xp == rs
@pytest.mark.parametrize('tz', [None, 'America/New_York'])
def test_boolean_compare_transpose_tzindex_with_dst(self, tz):
# GH 19970
idx = date_range('20161101', '20161130', freq='4H', tz=tz)
df = DataFrame({'a': range(len(idx)), 'b': range(len(idx))},
index=idx)
result = df.T == df.T
expected = DataFrame(True, index=list('ab'), columns=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('copy', [True, False])
@pytest.mark.parametrize('method, tz', [
['tz_localize', None],
['tz_convert', 'Europe/Berlin']
])
def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz):
# GH 6326
result = DataFrame(np.arange(0, 5),
index=date_range('20131027', periods=5,
freq='1H', tz=tz))
getattr(result, method)('UTC', copy=copy)
expected = DataFrame(np.arange(0, 5),
index=date_range('20131027', periods=5,
freq='1H', tz=tz))
tm.assert_frame_equal(result, expected)
def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture):
# GH 25843
tz = tz_aware_fixture
result = DataFrame({'d': [pd.Timestamp('2019', tz=tz)]},
dtype='datetime64[ns]')
expected = DataFrame({'d': [pd.Timestamp('2019')]})
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
cassinius/right-to-forget-data | src/multi_class/linear_svc.py | 1 | 1024 | from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from src.multi_class import input_preproc
from src.multi_class import calculate_metrics
def runClassifier(X_train, X_test, y_train, y_test):
# print y_train
predictions = OneVsRestClassifier(LinearSVC(), n_jobs=-1)\
.fit(X_train, y_train).predict(X_test)
# Metrics...
precision, recall, f1, accuracy = calculate_metrics.calculateMetrics(predictions, y_test)
print( "intermediary results (precision | recall | F1 Score | Accuracy):" )
print( "%.6f %.6f %.6f %.6f" % (precision, recall, f1, accuracy) )
return precision, recall, f1, accuracy
if __name__ == "__main__":
X_train, X_test, y_train, y_test = input_preproc.readIris()
precision, recall, f1, accuracy = runClassifier(X_train, X_test, y_train, y_test)
print( "\n================================" )
print( "Precision | Recall | F1 Score | Accuracy: " )
print( "%.6f %.6f %.6f %.6f" % (precision, recall, f1, accuracy) )
| apache-2.0 |
barentsen/dave | milesplay/productionPCA.py | 1 | 16265 |
"""
Created on Tue Aug 2 09:55:43 2016
@author: Miles
"""
import numpy as np
import matplotlib.pyplot as plt
import dave.fileio.mastio as mastio
import dave.fileio.tpf as tpf
import dave.fileio.kplrfits as kplrfits
import dave.misc.noise as noise
import gapfill
def getData(epic, campaign):
"""Obtains the data for the star
For a particular star, this obtains its pixel time series data cube as well
as time points which may be bad due to thruster firings.
Inputs:
----------
epic
(int) The k2 epic number for the desired star
campaign
(int) The k2 campaign for the desired star
Returns:
------------
cube
(3d numpy array) The k2 pixel time series data for the star with
dimensions (time, number of rows per cadence, number of columns per
cadence)
badIdx
(1d boolean numpy array) Boolean array with true corresponding to a bad
time point due to thruster firings
"""
ar = mastio.K2Archive()
fits, hdr = ar.getLongTpf(epic, campaign, header=True)
cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
# get the thruster firing indeces
q = fits['QUALITY'].astype(np.uint32)
rollTweakIdx = q & kplrfits.SapQuality['DefiniteRollTweak']
momDumpIdx = q & kplrfits.SapQuality['MomDump']
badIdx = rollTweakIdx | momDumpIdx
badIdx = badIdx.astype(bool)
return cube, badIdx
def takeOutNaNs(pixSeries):
"""Takes out NaNs from a pixel time series
Takes out NaNs in each pixel's time series and replaces them by filling in
the gaps using interpolation methods.
If a single pixel's time series is made up of all NaNs, replace all of the
NaNs with 0
Notes:
-----------
For more information, see gapfill.py
Inputs:
----------
pixSeries
(2d numpy array) Size: (number of pixels in cadence, number of time
points)
The 3d data cube reshaped to a 2d array to make it easy to iterate over
Returns:
------------
newPixSeries
(2d numpy array) Size: (number of pixels in cadence, number of time
points)
The input array with all of the NaNs taken out
"""
newPixSeries = []
for singlePixTimeSeries in pixSeries:
if np.all(np.isnan(singlePixTimeSeries)):
singlePixTimeSeries = np.zeros(len(singlePixTimeSeries))
else:
singlePixTimeSeries, badIdx = gapfill.gapFill(singlePixTimeSeries)
newPixSeries.append(singlePixTimeSeries)
return np.array(newPixSeries)
def hasNaNs(pixSeries):
"""Checks if any NaNs are present in the pixel time series
Iterates through a list of single pixel time series and takes the sum of
the series. If the sum is NaN, we know there is a NaN present in the series
Inputs:
----------
pixSeries
(2d numpy array) Size: (number of pixels in cadence, number of time
points) The 3d data cube reshaped to a 2d array to make it easy to
iterate over.
Returns:
----------
A boolean. True if there are NaNs, false if there are no NaNs
"""
NaNcheckArray = []
for singlePixSeries in pixSeries:
if np.isnan(np.sum(pixSeries)):
print np.sum(pixSeries)
NaNcheckArray.append(True)
else:
NaNcheckArray.append(False)
return np.any(NaNcheckArray)
def normalize(pixSeries):
"""Normalizes the pixel time series by the standard deviation method
Normalizes by taking the pixel time series, subtracting the mean, and
dividing by the standard deviation of the series
Inputs:
----------
pixSeries
(2d numpy array) Size: (number of pixels in cadence, number of time
points) The 3d data cube reshaped to a 2d array to make it easy to
iterate over.
Note: This series should have the NaNs taken out of it first
Returns:
------------
pixSeriesNorm
(2d numpy array) Size: (number of pixels in cadence, number of time
points)
The normalized input array
"""
assert not hasNaNs(pixSeries)
pixSeriesNorm = []
for singlePixSeries in pixSeries:
singlePixSeries = (singlePixSeries - np.mean(singlePixSeries))/ \
(np.std(singlePixSeries) + 1E-10)
pixSeriesNorm.append(singlePixSeries)
return np.array(pixSeriesNorm)
def getRawLightcurve(pixSeries):
"""Gets the total light curve
Sums up the flux for each cadence to get a total light curve
Inputs:
----------
pixSeries
(2d numpy array) Size: (number of pixels in cadence, number of time
points) The 3d data cube reshaped to a 2d array to make it easy to
iterate over.
Note: This series should have the NaNs taken out of it first
Returns:
----------
totLightcurve
(1d numpy array) Length equal to number of time steps. The sum of each
cadence at each time step
"""
totLightcurve = np.sum(pixSeries, axis=0)
return totLightcurve
def performSVD(pixSeriesNorm):
"""Obtains the principal components of the pixel time series
Uses the numpy.linalg.svd function to compute the principal components of
the single pixel time series which are non-zero. The svd function returns
a tuple of values, the second index being a 2d numpy array of the principal
components with size (number of components, time steps).
Notes:
-----------
More information on the numpy svd function can be found
`here <http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html>`_
Inputs:
----------
pixSeriesNorm
(2d numpy array) Size: (number of pixels in cadence, number of time
points)
The 3d data cube reshaped to a 2d array to make it easy to iterate over
Note: The time series should be normalized first using the normalize()
function
Returns:
------------
prinComps
(2d numpy array) Size: (number of components, time steps)
Numpy array of the principal components, ranked from most influential
to least (i.e. prinComps[0] is the most dominant component in the light
curve)
"""
# don't include the single pixel time series which are all zero
idx = np.any(pixSeriesNorm, axis=1)
pixSeriesNormNonzero = pixSeriesNorm[idx, :]
prinComps = np.linalg.svd(pixSeriesNormNonzero, full_matrices=0,compute_uv = 1)[2]
return prinComps
def curveFit(prinCompsToFit, totLightcurve):
"""Fits the desired number of principal components to the raw light curve
Uses the numpy.linalg.lstsq function to get a vector of coefficients which
is then used to generate the best fit for the desired number of principal
components by multilying the matrix of principal components by the vector
of coefficients. The resulting vector is the best fit light curve.
Notes:
-----------
More information on the numpy lstsq function can be found
`here <http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html>`_
Inputs:
----------
prinCompsToFit
(2d numpy array) Size: (number of principal components to fit, time
steps)
This is a 2d matrix of the principal components to use in generating
the best fit to the total raw light curve.
totLightcurve
(1d numpy array) Array of summed raw light curve generated by summing
each cadence, the length of which should be equal to the number of time
steps for this particular data set.
Returns:
------------
fittedCurve
(1d numpy array) This array is the best fit to the raw light curve
using the specified principal components, the length of which is equal
to the number of time steps for this particular data set.
"""
coeffs = np.linalg.lstsq(prinCompsToFit.T, totLightcurve)[0]
fittedCurve = prinCompsToFit.T.dot(coeffs.T)
return fittedCurve
def chooseBestSGcdppValue(sgcdppList, thresh=0.5):
"""Chooses the number of principal components to use in the final best fit
This function expects a list of values generated by using Jeff Van Cleve's
Savitzy-Golay technique. Each of these values correspond to using a
different number of principal components in the fit and correction of the
light curve. This function then chooses the optimal number of principal
components by determining at what value in this list the values become
close enough to one another to effectively treat it as a flat line. At this
point, it is futile and even detrimental to continue adding principal
components to the fit calculation.
This is achieved by implementing a threshold, i.e. if the difference
between two values becomes less than a certain threshold, the number of
principal components used in the fit corresponding to that sgcdpp value is
the optimal number of principal components to use.
Notes:
-----------
More information on the Savitzy-Golay technique can be found in
dave.misc.noise
Inputs:
----------
sgcdppList
(1d array) A list of values obtained by the Savitzy-Golay technique.
Each value corresponds to the calculation done with a fit using a
different number of principal components.
Optional Inputs:
-------------------
thresh
(float) Threshold to use in determining where in the list of sgcdpp
values it starts to become effectively flat
Returns:
------------
optimalNumPC
(int) The optimal number of principal components to use in the fit and
correction to the raw light curve
"""
# pick where it starts to flatline based on some threshold
flatLineInd = np.where(np.abs(np.diff(sgcdppList[1])) < thresh)[0][0]
optimalNumPC = sgcdppList[0][flatLineInd]
return optimalNumPC
def generatePrinComps(dataCube, thrusterFireInds):
""" Prepares the data for SVD calculation and gets principal components
Prepares the data for SVD calculation by reshaping the data cube into a 2d
numpy array, taking out thruster firing indeces, taking out NaNs,
generating the total raw light curve, and normalizing the pixel time series
Ultimately gets the principal components by doing an SVD calculation
Inputs:
----------
dataCube
(3d numpy array) Size: (timesteps, num rows, num cols) the raw data
cube obtained from Kepler
thrusterFireInds
(1d boolean numpy array) True where there are known bad indeces due to
thruster firings
Returns:
----------
prinComps
(2d numpy array) Size: (number of components, time steps)
Numpy array of the principal components, ranked from most influential
to least (i.e. prinComps[0] is the most dominant component in the light
curve)
totLightcurve
(1d numpy array) Array of summed raw light curve generated by summing
each cadence, the length of which should be equal to the number of time
steps for this particular data set.
t
(1d numpy array) the time steps
numPixels
(int) the number of pixels per cadence
"""
nt, nr, nc = dataCube.shape
# flatten the cube
pixSeries = dataCube.reshape((nt, nc*nr)).T
# take out the points with known thruster firings
pixSeries = pixSeries[:, ~thrusterFireInds]
# replace NaNs with mean of each pixel series
pixSeries = takeOutNaNs(pixSeries)
# get the total number of pixels for each cadence
numPixels = pixSeries.shape[0]
# get the total light curve by summing all of the cadences
totLightcurve = getRawLightcurve(pixSeries)
# get the time axis for light curves
t = np.arange(len(totLightcurve))
# normalize the time series for each pixel
pixSeriesNorm = normalize(pixSeries)
# get the principal components
prinComps = performSVD(pixSeriesNorm)
return prinComps, totLightcurve, t, numPixels
def inputData():
"""Place to specify what star to get data for
Gets the pixel time series data cube and indeces of known thruster firings
for the star
Inputs:
----------
None
Returns:
----------
dataCube
(3d numpy array)
"""
epic = 206103150
campaign = 3
dataCube, thrusterFireInds = getData(epic, campaign)
return dataCube, thrusterFireInds
def chooseNumPrinComps(prinComps, totLightcurve, numPixels):
"""Chooses the optimal # of principal components to use in the final fit
Generates a list of sgcdpp values by doing an sgcdpp calculation (see
noise.computeSgCdpp_ppm)
Iterates through the list of sgcdpp values and determines where the slope
between two values starts to "flatten out" based on a certain threshold
Inputs:
----------
prinComps
(2d numpy array) Size: (number of components, time steps)
Numpy array of the principal components, ranked from most influential
to least (i.e. prinComps[0] is the most dominant component in the light
curve)
totLightcurve
(1d numpy array) Array of summed raw light curve generated by summing
each cadence, the length of which should be equal to the number of time
steps for this particular data set.
numPixels
(int) the number of pixels per cadence
Returns:
----------
optimalNumPC
(int) the optimal number of principal components to use in the best fit
and correction to the raw light curve
"""
# in my experience, the optimal number of principal components is always
# less than half of the total number of pixels. To save computation time,
# I only generate sgcdpp values for the first half of principal components
numPrinComps_sgcdppCalc = numPixels/2
# threshold for sigma clip
sigmaClipThresh = 5.
# get a list of sgcdpp values for fitting different numbers of prin comps
n = 1
sgcdppList = []
while n < numPrinComps_sgcdppCalc:
prinCompsToFit = prinComps[:n]
# fit the light curve to the amount of prin comps selected
fittedCurve = curveFit(prinCompsToFit, totLightcurve)
# correct the light curve by subtracting the prin comp fit
correctedCurve = totLightcurve - fittedCurve
# make the mean of the corrected curve = 0,
# necessary for sgcdpp calculation
correctedCurveMeanZero = correctedCurve / np.mean(correctedCurve) - 1
# get sigma clip true/false values on the corrected curve with mean 0
sigClip_tf = noise.sigmaClip(correctedCurveMeanZero, sigmaClipThresh)
# perform sigma clip
correctedMeanZeroSigClip = correctedCurveMeanZero[~sigClip_tf]
# get the sgcdpp value
sgcdppValue = noise.computeSgCdpp_ppm(correctedMeanZeroSigClip)
sgcdppList.append([n,sgcdppValue])
n += 1
sgcdppList = np.array(sgcdppList).T
# choose the number of principal components to use
optimalNumPC = chooseBestSGcdppValue(sgcdppList)
return optimalNumPC
def main():
dataCube, thrusterFireInds = inputData()
prinComps, totLightcurve, t, numPixels = generatePrinComps(dataCube,
thrusterFireInds)
optimalNumPC = chooseNumPrinComps(prinComps, totLightcurve, numPixels)
bestFit = curveFit(prinComps[:optimalNumPC], totLightcurve)
bestFitCorrectedLightcurve = totLightcurve - bestFit
# plot the raw light curve with best fit in green
plt.figure()
plt.plot(t, bestFitCorrectedLightcurve, ".", label="Corrected Lightcurve")
plt.title("sgcdpp best correction, PC = %i"%optimalNumPC)
plt.xlabel("time")
plt.ylabel("flux")
#plt.legend()
plt.show()
main() | mit |
cohortfsllc/cohort-cocl2-sandbox | buildbot/buildbot_selector.py | 2 | 19580 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import subprocess
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform
python = sys.executable
bash = '/bin/bash'
echo = 'echo'
BOT_ASSIGNMENT = {
######################################################################
# Buildbots.
######################################################################
'xp-newlib-opt':
python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
'xp-glibc-opt':
python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',
'xp-bare-newlib-opt':
python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
'xp-bare-glibc-opt':
python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',
'precise-64-validator-opt':
python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
# Clang.
'precise_64-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
'mac10.7-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
# ASan.
'precise_64-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
'mac10.7-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',
# Sanitizer Pnacl toolchain buildbot.
'asan':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-64 '
' --sanitize address --skip-tests',
# PNaCl.
'oneiric_32-newlib-arm_hw-pnacl-panda-dbg':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-dbg',
'oneiric_32-newlib-arm_hw-pnacl-panda-opt':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-opt',
'precise_64-newlib-arm_qemu-pnacl-dbg':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-dbg',
'precise_64-newlib-arm_qemu-pnacl-opt':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-opt',
'precise_64-newlib-x86_32-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
'precise_64-newlib-x86_64-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'mac10.8-newlib-opt-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'win7-64-newlib-opt-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'precise_64-newlib-mips-pnacl':
echo + ' "TODO(mseaborn): add mips"',
# PNaCl Spec
'precise_64-newlib-arm_qemu-pnacl-buildonly-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-buildonly',
'oneiric_32-newlib-arm_hw-pnacl-panda-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-hw',
'lucid_64-newlib-x86_32-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-x8632',
'lucid_64-newlib-x86_64-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-x8664',
# NaCl Spec
'lucid_64-newlib-x86_32-spec':
bash + ' buildbot/buildbot_spec2k.sh nacl-x8632',
'lucid_64-newlib-x86_64-spec':
bash + ' buildbot/buildbot_spec2k.sh nacl-x8664',
# Android bots.
'precise64-newlib-dbg-android':
python + ' buildbot/buildbot_standard.py dbg arm newlib --android',
'precise64-newlib-opt-android':
python + ' buildbot/buildbot_standard.py opt arm newlib --android',
# Valgrind bots.
'precise-64-newlib-dbg-valgrind':
echo + ' "Valgrind bots are disabled: see '
'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
'precise-64-glibc-dbg-valgrind':
echo + ' "Valgrind bots are disabled: see '
'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
# Coverage.
'mac10.6-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'precise-64-32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
'precise-64-64-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'xp-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
######################################################################
# Trybots.
######################################################################
'nacl-precise64_validator_opt':
python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
'nacl-precise64_newlib_dbg_valgrind':
bash + ' buildbot/buildbot_valgrind.sh newlib',
'nacl-precise64_glibc_dbg_valgrind':
bash + ' buildbot/buildbot_valgrind.sh glibc',
# Android trybots.
'nacl-precise64-newlib-dbg-android':
python + ' buildbot/buildbot_standard.py dbg arm newlib --android',
'nacl-precise64-newlib-opt-android':
python + ' buildbot/buildbot_standard.py opt arm newlib --android',
# Coverage trybots.
'nacl-mac10.6-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'nacl-precise-64-32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
'nacl-precise-64-64-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'nacl-win32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
# Clang trybots.
'nacl-precise_64-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
'nacl-mac10.6-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
# ASan.
'nacl-precise_64-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
'nacl-mac10.7-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',
# Pnacl main trybots
'nacl-precise_64-newlib-arm_qemu-pnacl':
bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu arm',
'nacl-precise_64-newlib-x86_32-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
'nacl-precise_64-newlib-x86_64-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'nacl-precise_64-newlib-mips-pnacl':
echo + ' "TODO(mseaborn): add mips"',
'nacl-arm_opt_panda':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-try',
'nacl-arm_hw_opt_panda':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-try',
'nacl-mac10.8_newlib_opt_pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'nacl-win7_64_newlib_opt_pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
# Pnacl spec2k trybots
'nacl-precise_64-newlib-x86_32-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8632',
'nacl-precise_64-newlib-x86_64-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8664',
'nacl-arm_perf_panda':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-buildonly',
'nacl-arm_hw_perf_panda':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-hw',
# Toolchain glibc.
'precise64-glibc': bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
'mac-glibc': bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
'win7-glibc': 'buildbot\\buildbot_windows-glibc-makefile.bat',
# Toolchain newlib x86.
'win7-toolchain_x86': 'buildbot\\buildbot_toolchain_win.bat',
'mac-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh mac',
'precise64-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh linux',
# Toolchain (glibc) ARM.
'win7-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build',
'mac-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build',
'precise64-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' --test_toolchain nacl_arm_glibc_raw'
' toolchain_build',
# BIONIC toolchain builders.
'precise64-toolchain_bionic':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build_bionic',
# Pnacl toolchain builders.
'linux-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-32',
'linux-pnacl-x86_64':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-64',
'mac-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot',
'win-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot',
# Pnacl toolchain testers
'linux-pnacl-x86_64-tests-x86_64':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-64',
'linux-pnacl-x86_64-tests-x86_32':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-32',
'linux-pnacl-x86_64-tests-arm':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot arm',
# MIPS toolchain buildbot.
'linux-pnacl-x86_32-tests-mips':
bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu mips32',
# Toolchain trybots.
'nacl-toolchain-precise64-newlib':
bash + ' buildbot/buildbot_toolchain.sh linux',
'nacl-toolchain-mac-newlib': bash + ' buildbot/buildbot_toolchain.sh mac',
'nacl-toolchain-win7-newlib': 'buildbot\\buildbot_toolchain_win.bat',
'nacl-toolchain-precise64-newlib-arm': # TODO(bradnelson): rename
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' --test_toolchain nacl_arm_glibc_raw'
' toolchain_build',
'nacl-toolchain-mac-newlib-arm': # TODO(bradnelson): rename
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' toolchain_build',
'nacl-toolchain-win7-newlib-arm': # TODO(bradnelson): rename
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' toolchain_build',
'nacl-toolchain-precise64-glibc':
bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
'nacl-toolchain-mac-glibc':
bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
'nacl-toolchain-win7-glibc':
'buildbot\\buildbot_windows-glibc-makefile.bat',
# Pnacl toolchain trybots.
'nacl-toolchain-linux-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-32',
'nacl-toolchain-linux-pnacl-x86_64':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64',
'nacl-toolchain-linux-pnacl-mips': echo + ' "TODO(mseaborn)"',
'nacl-toolchain-mac-pnacl-x86_32':
python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
'nacl-toolchain-win7-pnacl-x86_64':
python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
# Sanitizer Pnacl toolchain trybots.
'nacl-toolchain-asan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize address --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize memory not understood.
'nacl-toolchain-msan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize memory --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize thread not understood.
'nacl-toolchain-tsan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize thread --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize undefined not understood.
'nacl-toolchain-ubsan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize undefined --skip-tests',
}
special_for_arm = [
'win7_64',
'win7-64',
'lucid-64',
'lucid64',
'precise-64',
'precise64'
]
for platform in [
'vista', 'win7', 'win8', 'win',
'mac10.6', 'mac10.7', 'mac10.8',
'lucid', 'precise'] + special_for_arm:
if platform in special_for_arm:
arch_variants = ['arm']
else:
arch_variants = ['', '32', '64', 'arm']
for arch in arch_variants:
arch_flags = ''
real_arch = arch
arch_part = '-' + arch
# Disable GYP build for win32 bots and arm cross-builders. In this case
# "win" means Windows XP, not Vista, Windows 7, etc.
#
# Building via GYP always builds all toolchains by default, but the win32
# XP pnacl builds are pathologically slow (e.g. ~38 seconds per compile on
# the nacl-win32_glibc_opt trybot). There are other builders that test
# Windows builds via gyp, so the reduced test coverage should be slight.
if arch == 'arm' or (platform == 'win' and arch == '32'):
arch_flags += ' --no-gyp'
if platform == 'win7' and arch == '32':
arch_flags += ' --no-goma'
if arch == '':
arch_part = ''
real_arch = '32'
# Test with Breakpad tools only on basic Linux builds.
if sys.platform.startswith('linux'):
arch_flags += ' --use-breakpad-tools'
for mode in ['dbg', 'opt']:
for libc in ['newlib', 'glibc']:
# Buildbots.
for bare in ['', '-bare']:
name = platform + arch_part + bare + '-' + libc + '-' + mode
assert name not in BOT_ASSIGNMENT, name
BOT_ASSIGNMENT[name] = (
python + ' buildbot/buildbot_standard.py ' +
mode + ' ' + real_arch + ' ' + libc + arch_flags)
# Trybots
for arch_sep in ['', '-', '_']:
name = 'nacl-' + platform + arch_sep + arch + '_' + libc + '_' + mode
assert name not in BOT_ASSIGNMENT, name
BOT_ASSIGNMENT[name] = (
python + ' buildbot/buildbot_standard.py ' +
mode + ' ' + real_arch + ' ' + libc + arch_flags)
def EscapeJson(data):
return '"' + json.dumps(data).replace('"', r'\"') + '"'
def HasNoPerfResults(builder):
if 'pnacl-buildonly-spec' in builder:
return True
return builder in [
'mac-toolchain_arm',
'win-pnacl-x86_32',
'linux-pnacl-x86_32-tests-mips',
'precise64-toolchain_bionic',
]
def Main():
builder = os.environ.get('BUILDBOT_BUILDERNAME')
build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
build_revision = os.environ.get('BUILDBOT_GOT_REVISION',
os.environ.get('BUILDBOT_REVISION'))
slave_type = os.environ.get('BUILDBOT_SLAVE_TYPE')
cmd = BOT_ASSIGNMENT.get(builder)
if not cmd:
sys.stderr.write('ERROR - unset/invalid builder name\n')
sys.exit(1)
env = os.environ.copy()
# Don't write out .pyc files because in cases in which files move around or
# the PYTHONPATH / sys.path change, old .pyc files can be mistakenly used.
# This avoids the need for admin changes on the bots in this case.
env['PYTHONDONTWRITEBYTECODE'] = '1'
# Use .boto file from home-dir instead of buildbot supplied one.
if 'AWS_CREDENTIAL_FILE' in env:
del env['AWS_CREDENTIAL_FILE']
alt_boto = os.path.expanduser('~/.boto')
if os.path.exists(alt_boto):
env['BOTO_CONFIG'] = alt_boto
cwd_drive = os.path.splitdrive(os.getcwd())[0]
env['GSUTIL'] = cwd_drive + '/b/build/third_party/gsutil/gsutil'
# When running from cygwin, we sometimes want to use a native python.
# The native python will use the depot_tools version by invoking python.bat.
if pynacl.platform.IsWindows():
env['NATIVE_PYTHON'] = 'python.bat'
else:
env['NATIVE_PYTHON'] = 'python'
if sys.platform == 'win32':
# If the temp directory is not on the same drive as the working directory,
# there can be random failures when cleaning up temp directories, so use
# a directory on the current drive. Use __file__ here instead of os.getcwd()
# because toolchain_main picks its working directories relative to __file__
filedrive, _ = os.path.splitdrive(__file__)
tempdrive, _ = os.path.splitdrive(env['TEMP'])
if tempdrive != filedrive:
env['TEMP'] = filedrive + '\\temp'
if not os.path.exists(env['TEMP']):
os.mkdir(env['TEMP'])
# Ensure a temp directory exists.
if 'TEMP' not in env:
env['TEMP'] = tempfile.gettempdir()
# Isolate build's temp directory to a particular location so we can clobber
# the whole thing predictably and so we have a record of who's leaking
# temporary files.
nacl_tmp = os.path.join(env['TEMP'], 'nacl_tmp')
if not os.path.exists(nacl_tmp):
os.mkdir(nacl_tmp)
env['TEMP'] = os.path.join(nacl_tmp, builder)
if not os.path.exists(env['TEMP']):
os.mkdir(env['TEMP'])
# Set all temp directory variants to the same thing.
env['TMPDIR'] = env['TEMP']
env['TMP'] = env['TEMP']
print 'TEMP=%s' % env['TEMP']
sys.stdout.flush()
# Run through runtest.py to get upload of perf data.
build_properties = {
'buildername': builder,
'mastername': 'client.nacl',
'buildnumber': str(build_number),
}
factory_properties = {
'perf_id': builder,
'show_perf_results': True,
'step_name': 'naclperf', # Seems unused, but is required.
'test_name': 'naclperf', # Really "Test Suite"
}
# Locate the buildbot build directory by relative path, as it's absolute
# location varies by platform and configuration.
buildbot_build_dir = os.path.join(* [os.pardir] * 4)
runtest = os.path.join(buildbot_build_dir, 'scripts', 'slave', 'runtest.py')
# For builds with an actual build number, require that the script is present
# (i.e. that we're run from an actual buildbot).
if build_number is not None and not os.path.exists(runtest):
raise Exception('runtest.py script not found at: %s\n' % runtest)
cmd_exe = cmd.split(' ')[0]
cmd_exe_ext = os.path.splitext(cmd_exe)[1]
# Do not wrap these types of builds with runtest.py:
# - tryjobs
# - commands beginning with 'echo '
# - batch files
# - debug builders
# - builds with no perf tests
if not (slave_type == 'Trybot' or
cmd_exe == echo or
cmd_exe_ext == '.bat' or
'-dbg' in builder or
HasNoPerfResults(builder)):
# Perf dashboards are now generated by output scraping that occurs in the
# script runtest.py, which lives in the buildbot repository.
# Non-trybot builds should be run through runtest, allowing it to upload
# perf data if relevant.
cmd = ' '.join([
python, runtest,
'--revision=' + build_revision,
'--build-dir=src/out',
'--results-url=https://chromeperf.appspot.com',
'--annotate=graphing',
'--no-xvfb', # We provide our own xvfb invocation.
'--factory-properties', EscapeJson(factory_properties),
'--build-properties', EscapeJson(build_properties),
cmd,
])
print "%s runs: %s\n" % (builder, cmd)
sys.stdout.flush()
retcode = subprocess.call(cmd, env=env, shell=True)
sys.exit(retcode)
if __name__ == '__main__':
Main()
| bsd-3-clause |
Srisai85/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
jgomezc1/FEM-Notes | scripts/1DELEMENT.py | 1 | 1170 | # -*- coding: utf-8 -*-
"""
--------1D Lagramge interpolation problem---------------
"""
from __future__ import division
import lagrange as la
import matplotlib.pyplot as plt
import numpy as np
from sympy import *
from sympy import init_printing
init_printing()
#
fx = lambda x: x**3 + 4*x**2 - 10.0;
#
# Assign symbols
#
x= symbols('x')
#
npts = 200
xx = np.linspace(-1, 1, npts)
fd = np.array([fx(-1.0), fx(1.0) ,fx(0.0)])
#
# Obtain Lagrange polynomials
#
pol = []
pol.append(simplify(la.LagrangPoly(x, 2, 0, [-1,1,0])))
pol.append(simplify(la.LagrangPoly(x, 2, 1, [-1,1,0])))
pol.append(simplify(la.LagrangPoly(x, 2, 2, [-1,1,0])))
#
#%% Plot P0
#
print "First polynomial", pol[0]
print "Second polynomial", pol[1]
print "Third polynomial", pol[2]
# Create a lambda function for the polynomials
pol_num = lambdify((x), pol, "numpy")
# Plotting the base functions
plt.subplot(1,2,1)
for k in range(3):
yy = pol_num(xx)[k]
plt.plot(xx, yy)
# Plotting the interpolated function
yy = sum(fd[k]*pol_num(xx)[k] for k in range(len(pol)))
plt.subplot(1,2,2)
plt.plot([-1, 1, 0], fd, 'ko')
plt.plot(xx, yy)
plt.show()
| mit |
aayushidwivedi01/spark-tk | python/sparktk/frame/constructors/import_pandas.py | 12 | 8940 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.tkcontext import TkContext
from sparktk import dtypes
import datetime
import logging
logger = logging.getLogger('sparktk')
def import_pandas(pandas_frame, schema=None, row_index=True, validate_schema=False, tc=TkContext.implicit):
"""
Imports data from the specified pandas data frame.
Parameters
----------
:param pandas_frame: (pandas.DataFrame) pandas dataframe object
:param schema: (Optional(list[tuples(string, type)])) Schema description of the fields for a given line. It is a
list of tuples which describe each field, (field name, field type), where the field name is a
string, and file is a supported type. If no schema is provided, the schema will be inferred based
on the column names and types from the pandas_frame.
:param row_index: (Optional(bool)) Indicates if the row_index is present in the pandas dataframe and needs to be
ignored when looking at the data values. Default value is True.
:param validate_schema: (Optional(bool)) If true, validates the data against the schema and attempts to cast the
data to the specified type, if it does not match the schema. Defaults to False.
:return: (Frame) spark-tk frame that contains data from the pandas_frame
Examples
--------
Create a pandas data frame:
>>> import pandas
>>> ratings_data = [[0, "invalid"], [1, "Very Poor"], [2, "Poor"], [3, "Average"], [4, "Good"], [5, "Very Good"]]
>>> df = pandas.DataFrame(ratings_data, columns=['rating_id', 'rating_text'])
>>> df
rating_id rating_text
0 0 invalid
1 1 Very Poor
2 2 Poor
3 3 Average
4 4 Good
5 5 Very Good
>>> df.columns.tolist()
['rating_id', 'rating_text']
>>> df.dtypes
rating_id int64
rating_text object
dtype: object
When using import_pandas by just passing the pandas data frame, it will use the column names and types from the
pandas data frame to generate the schema.
>>> frame = tc.frame.import_pandas(df)
>>> frame.inspect()
[#] rating_id rating_text
===========================
[0] 0 invalid
[1] 1 Very Poor
[2] 2 Poor
[3] 3 Average
[4] 4 Good
[5] 5 Very Good
>>> frame.schema
[('rating_id', <type 'long'>), ('rating_text', <type 'str'>)]
Alternatively, you can specify a schema when importing the pandas data frame. There is also the option to validate
the data against the schema. If this option is enabled, we will attempt to cast the data to the column's data type,
if it does not match the schema.
For example, here we will specify a schema where the rating_id column will instead be called 'rating_float' and it's
data type will be a float. We will also enable the validate_schema option so that the rating_id value will get
casted to a float:
>>> schema = [("rating_float", float), ("rating_str", unicode)]
>>> frame = tc.frame.import_pandas(df, schema, validate_schema=True)
>>> frame.inspect()
[#] rating_float rating_str
=============================
[0] 0.0 invalid
[1] 1.0 Very Poor
[2] 2.0 Poor
[3] 3.0 Average
[4] 4.0 Good
[5] 5.0 Very Good
>>> frame.schema
[('rating_float', <type 'float'>), ('rating_str', <type 'unicode'>)]
"""
try:
import pandas
except:
raise RuntimeError("pandas module not found, unable to download. Install pandas or try the take command.")
if not isinstance(pandas_frame, pandas.DataFrame):
raise TypeError("data_frame must be a pandas DataFrame.")
TkContext.validate(tc)
if schema is not None:
schema = _validate(schema)
else:
schema = _get_schema_from_df(pandas_frame)
if not row_index:
pandas_frame = pandas_frame.reset_index()
pandas_frame = pandas_frame.dropna(thresh=len(pandas_frame.columns))
field_names = [x[0] for x in schema]
if len(pandas_frame.columns) != len(field_names):
raise ValueError("Number of columns in Pandasframe {0} does not match the number of columns in the"
" schema provided {1}.".format(len(pandas_frame.columns), len(field_names)))
date_time_columns = [i for i, x in enumerate(pandas_frame.dtypes) if x == "datetime64[ns]"]
has_date_time = len(date_time_columns) > 0
# pandas gives us the date/time in nm or as a Timestamp, and spark-tk expects it as ms, so we need to do the conversion
def pandas_datetime_to_ms(row):
for i in date_time_columns:
if isinstance(row[i], long):
row[i] = row[i] / 1000000
elif isinstance(row[i], pandas.tslib.Timestamp) or isinstance(row[i], datetime):
dt = row[i]
# get number of seconds since epoch (%s) and multiply by 1000 for ms then get the
# microseconds to get the ms precision.
row[i] = long((long(dt.strftime("%s")) * 1000) + (dt.microsecond // 1000))
return row
pandas_rows = pandas_frame[0:len(pandas_frame.index)].values.tolist()
# if the dataframe has date/time columns, map them to ms
if (has_date_time):
pandas_rows = map(pandas_datetime_to_ms, pandas_rows)
# create frame with the pandas_rows
frame = tc.frame.create(pandas_rows, schema)
if validate_schema:
frame = tc.frame.create(frame.rdd, schema, validate_schema)
return frame
# map pandas data type strings to spark-tk schema types
_pandas_type_to_type_table = {
"datetime64[ns]": dtypes.datetime,
"object": str,
"int64": long,
"int32": int,
"float32": float,
"uint8": int,
}
def _get_schema_from_df(pandas_frame):
"""
Creates a spark-tk schema list from the specified pandas data frame.
:param pandas_frame: (pandas.DataFrame) pandas data frame to get column information
:return: (list[tuple(str, type)]) schema
"""
try:
import pandas
except:
raise RuntimeError("pandas module not found, unable to download. Install pandas or try the take command.")
if not isinstance(pandas_frame, pandas.DataFrame):
raise TypeError("pandas_frame must be a pandas DataFrame.")
column_names = pandas_frame.columns.tolist()
schema = []
for i, dtype in enumerate(pandas_frame.dtypes):
dtype_str = str(dtype)
if _pandas_type_to_type_table.has_key(dtype_str):
schema.append((column_names[i], _pandas_type_to_type_table[dtype_str]))
else:
logger.warn("Unsupported column type {0} for column {1}. Schema will use a str.").format(dtype_str, column_names[i])
schema.append(column_names[i], str)
return schema
def _validate(schema):
"""
Validates the specified schema
:param schema: (list[tuple(str, type)]) schema to validate
:return: (list[tuple(str, type)]) validated schema
"""
if not isinstance(schema, list) or len(schema) == 0:
raise TypeError("schema must be a non-empty list of tuples")
validated_schema = []
for field in schema:
if not isinstance(field, tuple):
raise TypeError("schema must be a list of tuples (column name string, type).")
if len(field) != 2:
raise TypeError("schema tuples are expected to have 2 items, but found " + len(field))
name = field[0]
if not isinstance(name, basestring):
raise ValueError("First item in schema tuple must be a string")
try:
data_type = dtypes.dtypes.get_from_type(field[1])
except ValueError:
raise ValueError("Second item in schema tuple must be a supported type: " + str(dtypes.dtypes))
else:
validated_schema.append((name, data_type))
return validated_schema
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.22/_downloads/68dbe405ac02c372b6167f7e86b7e3e0/plot_background_filtering.py | 4 | 49916 | # -*- coding: utf-8 -*-
r"""
.. _disc-filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general, and
how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus (1987) [1]_ and
Ifeachor & Jervis (2002) [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* (2015) [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`tut-filter-resample` tutorial.
.. contents::
:local:
Problem statement
=================
Practical issues with filtering electrophysiological data are covered
in Widmann *et al.* (2012) [6]_, where they conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase signal-to-noise ratio (SNR), but if it
is not used carefully, it can distort data. Here we hope to cover some
filtering basics so users can better understand filtering trade-offs and why
MNE-Python has chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + \ldots + a_N z^{-M}} \\
&= \frac{\sum_{k=0}^Mb_kz^{-k}}{\sum_{k=1}^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + \ldots + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - \ldots - a_N y(n - N)\\
&= \sum_{k=0}^M b_k x(n-k) - \sum_{k=1}^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over
1. the numerator coefficients :math:`b_k`, which get multiplied by
the previous input values :math:`x(n-k)`, and
2. the denominator coefficients :math:`a_k`, which get multiplied by
the previous output values :math:`y(n-k)`.
Note that these summations correspond to (1) a weighted `moving average`_ and
(2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in Parks & Burrus (1987) [1]_, FIR and IIR have different
trade-offs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
(2015) [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002 [[2]_], p. 321)...
FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always trade-offs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency trade-off, and it will
show up below.
FIR Filters
===========
First, we will focus on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try to design a low-pass filter and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG.
import numpy as np
from numpy.fft import fft, fftfreq
from scipy import signal
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a magnitude
# response of 1 in the pass-band (up to frequency :math:`f_p`) and a magnitude
# response of 0 in the stop-band (down to frequency :math:`f_s`) such that
# :math:`f_p=f_s=40` Hz here (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontinuity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in the frequency domain is actually a sinc_
# function in the time domain, which requires an infinite number of samples
# (and thus infinite time) to represent. So although this filter has ideal
# frequency suppression, it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 s, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 s)', flim=flim, compensate=True)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 s) gets us a
# slightly better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 s)', flim=flim, compensate=True)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 s),
# with a resulting larger x-axis:
n = int(round(10. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 s)', flim=flim, compensate=True)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire 10 seconds. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`,
# :func:`scipy.signal.firwin`, and `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <numpy.fft.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightforward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precise control of all frequency
# regions, we will primarily use and explore windowed FIR design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a more
# gradual slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 s filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (1.0 s)',
flim=flim, compensate=True)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 s) and still get acceptable
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.5 s)',
flim=flim, compensate=True)
###############################################################################
# But if we shorten the filter too much (2 cycles of 10 Hz = 0.2 s),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 s = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# So far, we have only discussed *non-causal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) the current
# time point :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, especially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming non-causal), minimum-phase filters do not require any
# compensation to achieve small delays in the pass-band. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the :func:`scipy.signal.minimum_phase` function, and note
# that the falloff is not as steep:
h_min = signal.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random and line). Note that the original clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin', verbose=True)
x_v16 = np.convolve(h, x)
# this is the linear->zero phase, causal-to-non-causal conversion / shift
x_v16 = x_v16[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim,
compensate=True)
###############################################################################
# Filter it with a different design method ``fir_design="firwin2"``, and also
# compensate for the constant filter delay. This method does not produce
# quite as sharp a transition compared to ``fir_design="firwin"``, despite
# being twice as long:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# filter_dur = 6.6 / transition_band # sec
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2', verbose=True)
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2', verbose=True)
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
# the effective h is one that is applied to the time-reversed version of itself
h_eff = np.convolve(h, h[::-1])
plot_filter(h_eff, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim, compensate=True)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin',
verbose=True)
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially in signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
"""Plot a signal."""
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (s)', xlim=t[[0, -1]])
X = fft(x)
freqs = fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yscale = 30
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
yticks = -np.arange(len(yticklabels)) / yscale
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-len(yticks) / yscale, 1. / yscale],
yticks=yticks, yticklabels=yticklabels)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few filter orders,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# non-causal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if non-causal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim,
compensate=True)
x_shallow = signal.sosfiltfilt(sos, x)
del sos
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given :ref:`above <tut_filtering_basics>` use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used whenever possible.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response. Let's also switch to using the MNE filter
# design function, which simplifies a few things and gives us some information
# about the resulting filter:
iir_params = dict(order=8, ftype='butter')
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain, 'Butterworth order=8', flim=flim,
compensate=True)
x_steep = signal.sosfiltfilt(filt['sos'], x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
iir_params.update(ftype='cheby1',
rp=1., # dB of acceptable pass-band ripple
)
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim, compensate=True)
###############################################################################
# If we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
iir_params['rp'] = 6.
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim,
compensate=True)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are non-causal (zero-phase), can
# make activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen (2011) [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to simulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet (2012) [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger (2012) [6]_ that
# the problematic low-pass filters from VanRullen (2011) [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* (2012) [4]_ to:
#
# "... generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* (2015) [7]_ also came to suggest a
# 0.1 Hz highpass. More evidence followed in Tanner *et al.* (2015) [8]_ of
# such distortions. Using data from language ERP studies of semantic and
# syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz
# caused significant effects to be introduced implausibly early when compared
# to the unfiltered data. From this, the authors suggested the optimal
# high-pass value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* (2015) [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass
# and high-pass filters... No visible distortion to the original
# waveform [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck (2010) [12]_,
# they found that applying a 1 Hz high-pass decreased the probability of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 Hz or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving the :ref:`sample-dataset` dataset,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* (2015) [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* (2016) [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* (2016) [10]_ rebutted that baseline correction can correct
# for problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = signal.sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In response, Maess *et al.* (2016) [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multi-electrode recordings
# the topology (i.e., spatial pattern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin`. In Widmann *et al.* (2015) [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 50.0 | 12.5 | 12.5 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
# or Blackman windows, respectively, as selected by the ``fir_window``
# argument for ``fir_design='firwin'``, and double these for
# ``fir_design='firwin2'`` mode.
#
# .. note:: For ``fir_design='firwin2'``, the multiplicative factors are
# doubled compared to what is given in Ifeachor & Jervis (2002) [2]_
# (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
# on the frequency response, which we compensate for by
# increasing the filter length. This is why
# ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately
# when using ``fir_design='firwin2'``. For design mode
# ``fir_design='firwin'``, there is no need to separate the
# operations, as the lowpass and highpass elements are constructed
# separately to meet the transition band requirements.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut-filter-resample`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M/EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in Widmann *et al.* (2015) [7]_. Briefly:
#
# * EEGLAB
# MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB
# (see the `EEGLAB filtering FAQ`_ for more information).
# * FieldTrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more information, see e.g. the
# `FieldTrip band-pass documentation <ftbp_>`_.
#
# Reporting Filters
# =================
# On page 45 in Widmann *et al.* (2015) [7]_, there is a convenient list of
# important filter parameters that should be reported with each publication:
#
# 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
# 2. Cutoff frequency (including definition)
# 3. Filter order (or length)
# 4. Roll-off or transition bandwidth
# 5. Passband ripple and stopband attenuation
# 6. Filter delay (zero-phase, linear-phase, non-linear phase) and causality
# 7. Direction of computation (one-pass forward/reverse, or two-pass forward
# and reverse)
#
# In the following, we will address how to deal with these parameters in MNE:
#
#
# Filter type
# -----------
# Depending on the function or method used, the filter type can be specified.
# To name an example, in :func:`mne.filter.create_filter`, the relevant
# arguments would be ``l_freq``, ``h_freq``, ``method``, and if the method is
# FIR ``fir_window`` and ``fir_design``.
#
#
# Cutoff frequency
# ----------------
# The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
# middle of the transition band. That is, if you construct a lowpass FIR filter
# with ``h_freq = 40``, the filter function will provide a transition
# bandwidth that depends on the ``h_trans_bandwidth`` argument. The desired
# half-amplitude cutoff of the lowpass FIR filter is then at
# ``h_freq + transition_bandwidth/2.``.
#
# Filter length (order) and transition bandwidth (roll-off)
# ---------------------------------------------------------
# In the :ref:`tut_filtering_in_python` section, we have already talked about
# the default filter lengths and transition bandwidths that are used when no
# custom values are specified using the respective filter function's arguments.
#
# If you want to find out about the filter length and transition bandwidth that
# were used through the 'auto' setting, you can use
# :func:`mne.filter.create_filter` to print out the settings once more:
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(
data=None, # data is only used for sanity checking, not strictly needed
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length.
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
###############################################################################
# .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter`
# will not print a filter length and transition bandwidth to the log.
# Instead, you can specify the roll-off with the ``iir_params``
# argument or stay with the default, which is a fourth order
# (Butterworth) filter.
#
# Passband ripple and stopband attenuation
# ----------------------------------------
#
# When use standard :func:`scipy.signal.firwin` design (as for FIR filters in
# MNE), the passband ripple and stopband attenuation are dependent upon the
# window used in design. For standard windows the values are listed in this
# table (see Ifeachor & Jervis (2002) [2]_, p. 357):
#
# +-------------------------+-----------------+----------------------+
# | Name of window function | Passband ripple | Stopband attenuation |
# +=========================+=================+======================+
# | Hann | 0.0545 dB | 44 dB |
# +-------------------------+-----------------+----------------------+
# | Hamming | 0.0194 dB | 53 dB |
# +-------------------------+-----------------+----------------------+
# | Blackman | 0.0017 dB | 74 dB |
# +-------------------------+-----------------+----------------------+
#
#
# Filter delay and direction of computation
# -----------------------------------------
# For reporting this information, it might be sufficient to read the docstring
# of the filter function or method that you apply. For example in the
# docstring of `mne.filter.create_filter`, for the phase parameter it says:
#
# Phase of the filter, only used if ``method='fir'``.
# By default, a symmetric linear-phase FIR filter is constructed.
# If ``phase='zero'`` (default), the delay of this filter
# is compensated for. If ``phase=='zero-double'``, then this filter
# is applied twice, once forward, and once backward. If 'minimum',
# then a minimum-phase, causal filter will be used.
#
#
# Summary
# =======
#
# When filtering, there are always trade-offs that should be considered.
# One important trade-off is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. https://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# https://doi.org/10.1016/j.jneumeth.2014.08.002
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. https://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artifacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: https://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: https://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: https://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _ftbp: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter
| bsd-3-clause |
bilgili/nest-simulator | extras/ConnPlotter/examples/connplotter_tutorial.py | 13 | 26833 | # -*- coding: utf-8 -*-
#
# connplotter_tutorial.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#!========================
#! ConnPlotter: A Tutorial
#!========================
#!
#! :Author: Hans Ekkehard Plesser
#! :Institution: Norwegian University of Life Sciences, Simula
#! Research Laboratory, RIKEN Brain Sciences Institute
#! :Version: 0.7
#! :Date: 1 December 2009
#! :Copyright: Hans Ekkehard Plesser
#! :License: Creative Commons Attribution-Noncommercial-Share Alike License v 3.0
#!
#! :Note: For best results, you should run this script with PyReport by Gael Varoquaux,
#! available from http://gael-varoquaux.info/computers/pyreport/
#!
#! Please set using_pyreport to True if you want to run the
#! script through pyreport. Otherwise, figures will not be captured
#! correctly.
using_pyreport = False
#! Introduction
#!=============
#! This tutorial gives a brief introduction to the ConnPlotter
#! toolbox. It is by no means complete.
#! Avoid interactive backend when using pyreport
if using_pyreport:
import matplotlib
matplotlib.use("Agg")
#! Import pylab to call pylab.show() so that pyreport
#! can capture figures created. Must come before import
#! ConnPlotter so we get the correct show().
import pylab
#! If not using pyreport, disable pylab.show() until we reach end of script
if not using_pyreport:
pylab_show = pylab.show
def nop(s=None): pass
pylab.show = nop
#! Import ConnPlotter and its examples
import ConnPlotter as cpl
import ConnPlotter.examples as ex
#! Turn of warnings about resized figure windows
import warnings
warnings.simplefilter("ignore")
#! Define a helper function to show LaTeX tables on the fly
def showTextTable(connPattern, fileTrunk):
"""
Shows a Table of Connectivity as textual table.
Arguments:
connPattern ConnectionPattern instance
fileTrunk Eventual PNG image will be fileTrunk.png
"""
import subprocess as subp # to call LaTeX etc
import os # to remove files
# Write to LaTeX file so we get a nice textual representation
# We want a complete LaTeX document, so we set ``standalone``
# to ``True``.
connPattern.toLaTeX(file=fileTrunk+'.tex', standalone=True, enumerate=True)
# Create PDF, crop, and convert to PNG
try:
devnull = open('/dev/null', 'w')
subp.call(['pdflatex', fileTrunk], stdout=devnull, stderr=subp.STDOUT)
# need wrapper, since pdfcrop does not begin with #!
subp.call(['pdfcrop '+fileTrunk+'.pdf '+fileTrunk+'-crop.pdf'], shell=True,
stdout=devnull, stderr=subp.STDOUT)
devnull.close()
os.rename(fileTrunk+'-crop.pdf', fileTrunk+'.pdf')
for suffix in ('.tex', '-crop.pdf', '.png', '.aux', '.log'):
if os.path.exists(fileTrunk + suffix):
os.remove(fileTrunk + suffix)
except:
raise Exception('Could not create PDF Table.')
#! Simple network
#! ==============
#! This is a simple network with two layers A and B; layer B has two populations, E and I.
#! On the NEST side, we use only synapse type ``static_synapse``. ConnPlotter then infers
#! that synapses with positive weights should have type ``exc``, those with negative
#! weight type ``inh``. Those two types are know to ConnPlotter.
#! Obtain layer, connection and model list from the example set
s_layer, s_conn, s_model = ex.simple()
#! Create Connection Pattern representation
s_cp = cpl.ConnectionPattern(s_layer, s_conn)
#! Show pattern as textual table (we cheat a little and include PDF directly)
showTextTable(s_cp, 'simple_tt')
#$ \centerline{\includegraphics{simple_tt.pdf}}
#! Show pattern in full detail
#! ---------------------------
#! A separate patch is shown for each pair of populations.
#!
#! - Rows represent senders, columns targets.
#! - Layer names are given to the left/above, population names to the right and below.
#! - Excitatory synapses shown in blue, inhibitory in red.
#! - Each patch has its own color scale.
s_cp.plot()
pylab.show()
#! Let us take a look at what this connection pattern table shows:
#!
#! - The left column, with header "A", is empty: The "A" layer receives no input.
#! - The right column shows input to layer "B"
#!
#! * The top row, labeled "A", has two patches in the "B" column:
#!
#! + The left patch shows relatively focused input to the "E" population in layer "B" (first row of "Connectivity" table).
#! + The right patch shows wider input to the "I" population in layer "B" (second row of "Connectivity" table).
#! + Patches are red, indicating excitatory connections.
#! + In both cases, mask are circular, and the product of connection weight and probability is independent of the distance between sender and target neuron.
#!
#! * The grey rectangle to the bottom right shows all connections from layer "B" populations to layer "B" populations. It is subdivided into two rows and two columns:
#!
#! + Left column: inputs to the "E" population.
#! + Right column: inputs to the "I" population.
#! + Top row: projections from the "E" population.
#! + Bottom row: projections from the "I" population.
#! + There is only one type of synapse for each sender-target pair, so there is only a single patch per pair.
#! + Patches in the top row, from population "E" show excitatory connections, thus they are red.
#! + Patches in the bottom row, from population "I" show inhibitory connections, thus they are blue.
#! + The patches in detail are:
#!
#! - **E to E** (top-left, row 3+4 in table): two rectangular projections at 90 degrees.
#! - **E to I** (top-right, row 5 in table): narrow gaussian projection.
#! - **I to E** (bottom-left, row 6 in table): wider gaussian projection.
#! - **I to I** (bottom-right, row 7 in table): circular projection covering entire layer.
#!
#! - **NB:** Color scales are different, so one **cannot** compare connection strengths between patches.
#! Full detail, common color scale
#! -------------------------------
s_cp.plot(globalColors=True)
pylab.show()
#! This figure shows the same data as the one above, but now all patches use a common color scale,
#! so full intensity color (either red or blue) indicates the strongest connectivity. From this we
#! see that
#!
#! - A to B/E is stronger than A to B/I
#! - B/E to B/I is the strongest of all connections at the center
#! - B/I to B/E is stronger than B/I to B/I
#! Aggregate by groups
#! -------------------
#! For each pair of population groups, sum connections of the same type across populations.
s_cp.plot(aggrGroups=True)
pylab.show()
#! In the figure above, all excitatory connections from B to B layer have been combined
#! into one patch, as have all inhibitory connections from B to B. In the upper-right
#! corner, all connections from layer A to layer B have been combined; the patch for
#! inhibitory connections is missing, as there are none.
#! Aggregate by groups and synapse models
#! --------------------------------------
s_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
#! When aggregating across synapse models, excitatory and inhibitory connections are combined. By
#! default, excitatory connections are weights with +1, inhibitory connections
#! with -1 in the sum. This may yield kernels with positive and negative values.
#! They are shown on a red-white-blue scale as follows:
#!
#! - White always represents 0.
#! - Positive values are represented by increasingly saturated red.
#! - Negative values are represented by increasingly saturated blue.
#! - Colorscales are separate for red and blue:
#!
#! * largest positive value: fully saturated red
#! * largest negative value: fully saturated blue
#!
#! - Each patch has its own colorscales.
#! - When ``aggrSyns=True`` is combined with ``globalColors=True``, all patches use the same minimum and maximum in their red and blue color scales. The the minimum is the negative of the maximum, so that blue and red intesities can be compared.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True)
pylab.show()
#! - We can explicitly set the limits of the color scale; if values exceeding the limits are present, this is indicated by an arrowhead at the end of the colorbar. User-defined color limits need not be symmetric about 0.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True, colorLimits=[-2,3])
pylab.show()
#! Save pattern to file
#! --------------------
#s_cp.plot(file='simple_example.png')
#! This saves the detailed diagram to the given file. If you want to save
#! the pattern in several file formats, you can pass a tuple of file names,
#! e.g., ``s_cp.plot(file=('a.eps', 'a.png'))``.
#!
#! **NB:** Saving directly to PDF may lead to files with artifacts. We
#! recommend to save to EPS and the convert to PDF.
#! Build network in NEST
#! ---------------------
import nest
import nest.topology as topo
#! Create models
for model in s_model:
nest.CopyModel(model[0], model[1], model[2])
#! Create layers, store layer info in Python variable
for layer in s_layer:
exec('%s = topo.CreateLayer(layer[1])' % layer[0])
#! Create connections, need to insert variable names
for conn in s_conn:
eval('topo.ConnectLayers(%s,%s,conn[2])' % (conn[0], conn[1]))
nest.Simulate(10)
#! **Ooops:*** Nothing happened? Well, it did, but pyreport cannot capture the
#! output directly generated by NEST. The absence of an error message in this
#! place shows that network construction and simulation went through.
#! Inspecting the connections actually created
#! :::::::::::::::::::::::::::::::::::::::::::
#! The following block of messy and makeshift code plots the targets of the
#! center neuron of the B/E population in the B/E and the B/I populations.
B_top = nest.GetStatus(RG, 'topology')[0]
ctr_id = topo.GetElement(RG, [int(B_top['rows']/2), int(B_top['columns']/2)])
# find excitatory element in B
E_id = [gid for gid in ctr_id
if nest.GetStatus([gid], 'model')[0] == 'E']
# get all targets, split into excitatory and inhibitory
alltgts = nest.GetStatus(nest.FindConnections(E_id, synapse_model='static_synapse'), 'target')
Etgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'E']
Itgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'I']
# obtain positions of targets
Etpos = tuple(zip(*topo.GetPosition(Etgts)))
Itpos = tuple(zip(*topo.GetPosition(Itgts)))
# plot excitatory
pylab.clf()
pylab.subplot(121)
pylab.scatter(Etpos[0], Etpos[1])
ctrpos = pylab.array(topo.GetPosition(E_id)[0])
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder = 99,
fc = 'r', alpha = 0.4, ec = 'none'))
ax.add_patch(pylab.Rectangle(ctrpos + pylab.array((-0.4,-0.2)), 0.8, 0.4, zorder = 1,
fc = 'none', ec = 'r', lw=3))
ax.add_patch(pylab.Rectangle(ctrpos + pylab.array((-0.2,-0.4)), 0.4, 0.8, zorder = 1,
fc = 'none', ec = 'r', lw=3))
ax.add_patch(pylab.Rectangle(ctrpos + pylab.array((-0.5,-0.5)), 1.0, 1.0, zorder = 1,
fc = 'none', ec = 'k', lw=3))
ax.set(aspect='equal', xlim=[-0.5,0.5], ylim=[-0.5,0.5],
xticks=[],yticks=[])
# plot inhibitory
pylab.subplot(122)
pylab.scatter(Itpos[0], Itpos[1])
ctrpos = topo.GetPosition(E_id)[0]
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder = 99,
fc = 'r', alpha = 0.4, ec = 'none'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.1, zorder = 2,
fc = 'none', ec = 'r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.2, zorder = 2,
fc = 'none', ec = 'r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.3, zorder = 2,
fc = 'none', ec = 'r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.5, zorder = 2,
fc = 'none', ec = 'r', lw=3))
ax.add_patch(pylab.Rectangle((-0.5,-0.5), 1.0, 1.0, zorder = 1,
fc = 'none', ec = 'k', lw=3))
ax.set(aspect='equal', xlim=[-0.5,0.5], ylim=[-0.5,0.5],
xticks=[],yticks=[])
pylab.show()
#! Thick red lines mark the mask, dashed red lines to the right one, two and three standard deviations.
#! The sender location is marked by the red spot in the center. Layers are 40x40 in size.
#! A more complex network
#! ======================
#!
#! This network has layers A and B, with E and I populations in B. The added
#! complexity comes from the fact that we now have four synapse types: AMPA,
#! NMDA, GABA_A and GABA_B. These synapse types are known to ConnPlotter.
#! Setup and tabular display
c_layer, c_conn, c_model = ex.complex()
c_cp = cpl.ConnectionPattern(c_layer, c_conn)
showTextTable(c_cp, 'complex_tt')
#$ \centerline{\includegraphics{complex_tt.pdf}}
#! Pattern in full detail
#! ----------------------
c_cp.plot()
pylab.show()
#! Note the following differences to the simple pattern case:
#!
#! - For each pair of populations, e.g., B/E as sender and B/E as target, we now have two patches representing AMPA and NMDA synapse for the E population, GABA_A and _B for the I population.
#! - Colors are as follows:
#!
#! :AMPA: red
#! :NMDA: orange
#! :GABA_A: blue
#! :GABA_B: purple
#! - Note that the horizontal rectangular pattern (table line 3) describes AMPA synapses, while the vertical rectangular pattern (table line 4) describes NMDA synapses.
#! Full detail, common color scale
#! -------------------------------
c_cp.plot(globalColors=True)
pylab.show()
#! As above, but now with a common color scale.
#! **NB:** The patch for the B/I to B/I connection may look empty, but it actually shows a very light shade of red. Rules are as follows:
#!
#! - If there is no connection between two populations, show the grey layer background.
#! - All parts of the target layer that are outside the mask or strictly zero are off-white.
#! - If it looks bright white, it is a very diluted shade of the color for the pertaining synpase type.
#! Full detail, explicit color limits
#! ----------------------------------
c_cp.plot(colorLimits=[0,1])
pylab.show()
#! As above, but the common color scale is now given explicitly.
#! The arrow at the right end of the color scale indicates that the values
#! in the kernels extend beyond +1.
#! Aggregate by synapse models
#! -----------------------------
#! For each population pair, connections are summed across
#! synapse models.
#!
#! - Excitatory kernels are weighted with +1, inhibitory kernels with -1.
#! - The resulting kernels are shown on a color scale ranging from red (inhibitory) via white (zero) to blue (excitatory).
#! - Each patch has its own color scale
c_cp.plot(aggrSyns=True)
pylab.show()
#!
#! - AMPA and NMDA connections from B/E to B/E are now combined to form a cross.
#! - GABA_A and GABA_B connections from B/I to B/E are two concentric spots.
#! Aggregate by population group
#! ------------------------------
c_cp.plot(aggrGroups=True)
pylab.show()
#! This is in many ways orthogonal to aggregation by synapse model:
#! We keep synapse types separat, while we combine across populations. Thus,
#! we have added the horizonal bar (B/E to B/E, row 3) with the spot (B/E to B/I, row 5).
#! Aggregate by population group and synapse model
#! -----------------------------------------------------------------
c_cp.plot(aggrGroups=True,aggrSyns=True)
pylab.show()
#! All connection are combined for each pair of sender/target layer.
#! CPTs using the total charge deposited (TCD) as intensity
#! -----------------------------------------------------------
#! TCD-based CPTs are currently only available for the ht_neuron, since ConnPlotter
#! does not know how to obtain \int g(t) dt from NEST for other conductance-based
#! model neurons.
#! We need to create a separate ConnectionPattern instance for each membrane potential
#! we want to use in the TCD computation
c_cp_75 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-75.0)
c_cp_45 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-45.0)
#! In order to obtain a meaningful comparison between both membrane potentials,
#! we use the same global color scale
#! V_m = -75 mV
#! ::::::::::::::
c_cp_75.plot(colorLimits=[0,150])
pylab.show()
#! V_m = -45 mV
#! ::::::::::::::
c_cp_45.plot(colorLimits=[0,150])
pylab.show()
#! Note that the NMDA projection virtually vanishes for V_m=-75mV, but is very strong for V_m=-45mV.
#! GABA_A and GABA_B projections are also stronger, while AMPA is weaker for V_m=-45mV.
#! Non-Dale network model
#! ======================
#! By default, ConnPlotter assumes that networks follow Dale's law, i.e.,
#! either make excitatory or inhibitory connections. If this assumption
#! is violated, we need to inform ConnPlotter how synapse types are grouped.
#! We look at a simple example here.
#! Load model
nd_layer, nd_conn, nd_model = ex.non_dale()
#! We specify the synapse configuration using the synTypes argument:
#!
#! - synTypes is a tuple.
#! - Each element in the tuple represents a group of synapse models
#! - Any sender can make connections with synapses from **one group only**.
#! - Each synapse model is specified by a ``SynType``.
#! - The SynType constructor takes three arguments:
#!
#! * The synapse model name
#! * The weight to apply then aggregating across synapse models
#! * The color to use for the synapse type
#!
#! - Synapse names must be unique, and must form a superset of all synapse models in the network.
nd_cp = cpl.ConnectionPattern(nd_layer, nd_conn, synTypes=(
(cpl.SynType('exc', 1.0, 'b'), cpl.SynType('inh', -1.0, 'r')),))
showTextTable(nd_cp, 'non_dale_tt')
#$ \centerline{\includegraphics{non_dale_tt.pdf}}
nd_cp.plot()
pylab.show()
#! Note that we now have red and blue patches side by side, as the same
#! population can make excitatory and inhibitory connections.
#! Configuring the ConnectionPattern display
#! =========================================
#! I will now show you a few ways in which you can configure how ConnPlotter shows
#! connection patterns.
#! User defined synapse types
#! --------------------------
#!
#! By default, ConnPlotter knows two following sets of synapse types.
#!
#! exc/inh
#! - Used automatically when all connections have the same ``synapse_model``.
#! - Connections with positive weight are assigned model exc, those with negative weight model inh.
#! - When computing totals, exc has weight +1, inh weight -1
#! - Exc is colored blue, inh red.
#!
#! AMPA/NMDA/GABA_A/GABA_B
#! - Used if the set of ``synapse_model`` s in the network is a subset of those four types.
#! - AMPA/NMDA carry weight +1, GABA_A/GABA_B weight -1.
#! - Colors are as follows:
#!
#! :AMPA: blue
#! :NMDA: green
#! :GABA_A: red
#! :GABA_B: magenta
#!
#!
#! We saw a first example of user-defined synapse types in the non-Dale example above.
#! In that case, we only changed the grouping. Here, I will demonstrate the effect of different
#! ordering, weighting, and color specifications. We use the complex model from above as example.
#!
#! *NOTE*: It is most likey a *bad idea* to change the colors or placement of synapse types. If
#! everyone uses the same design rules, we will all be able to read each others figures much
#! more easily.
#! Placement of synapse types
#! ::::::::::::::::::::::::::
#!
#! The ``synTypes`` nested tuple defines the placement of patches for different synapse
#! models. Default layout is
#!
#! ====== ======
#! AMPA NMDA
#! GABA_A GABA_B
#! ====== ======
#!
#! All four matrix elements are shown in this layout only when using ``mode='layer'`` display.
#! Otherwise, one or the other row is shown. Note that synapses that can arise from a layer
#! simultaneously, must always be placed on one matrix row, i.e., in one group. As an example,
#! we now invert placement, without any other changes:
cinv_syns = ( (cpl.SynType('GABA_B', -1, 'm'), cpl.SynType('GABA_A', -1, 'r')),
(cpl.SynType('NMDA' , 1, 'g'), cpl.SynType('AMPA' , 1, 'b')) )
cinv_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cinv_syns)
cinv_cp.plot()
pylab.show()
#! Notice that on each row the synapses are exchanged compared to the original figure above.
#! When displaying by layer, also the rows have traded place:
cinv_cp.plot(aggrGroups=True)
pylab.show()
#! Totals are not affected:
cinv_cp.plot(aggrGroups=True,aggrSyns=True)
pylab.show()
#! Weighting of synapse types in ``totals`` mode
#! :::::::::::::::::::::::::::::::::::::::::::::
#!
#! Different synapses may have quite different efficacies, so weighting them all with +-1
#! when computing totals may give a wrong impression. Different weights can be supplied
#! as second argument to SynTypes(). We return to the normal placement of synapses and
#! create two examples with very different weights:
cw1_syns = ( (cpl.SynType('AMPA' , 10, 'b'), cpl.SynType('NMDA' , 1, 'g')),
(cpl.SynType('GABA_A', -2, 'g'), cpl.SynType('GABA_B', -10, 'b')) )
cw1_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw1_syns)
cw2_syns = ( (cpl.SynType('AMPA' , 1, 'b'), cpl.SynType('NMDA' , 10, 'g')),
(cpl.SynType('GABA_A', -20, 'g'), cpl.SynType('GABA_B', -1, 'b')) )
cw2_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw2_syns)
#! We first plot them both in population mode
cw1_cp.plot(aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrSyns=True)
pylab.show()
#! Finally, we plot them aggregating across groups and synapse models
cw1_cp.plot(aggrGroups=True,aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrGroups=True,aggrSyns=True)
pylab.show()
#! Alternative colors for synapse patches
#! ::::::::::::::::::::::::::::::::::::::
#! Different colors can be specified using any legal color specification. Colors should
#! be saturated, as they will be mixed with white. You may also provide a colormap explicitly.
#! For this example, we use once more normal placement and weights. As all synapse types are shown
#! in layer mode, we use that mode for display here.
cc_syns = ( (cpl.SynType('AMPA' , 1, 'maroon'), cpl.SynType('NMDA' , 1, (0.9,0.5,0) )),
(cpl.SynType('GABA_A', -1, '0.7' ), cpl.SynType('GABA_B', 1, pylab.cm.hsv)) )
cc_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cc_syns)
cc_cp.plot(aggrGroups=True)
pylab.show()
#! We get the following colors:
#!
#! AMPA brownish
#! NMDA golden orange
#! GABA_A jet colormap from red (max) to blue (0)
#! GABA_B grey
#!
#! **NB:** When passing an explicit colormap, parts outside the mask will be shown to the
#! "bad" color of the colormap, usually the "bottom" color in the map. To let points outside
#! the mask appear in white, set the bad color of the colormap; unfortunately, this modifies
#! the colormap.
pylab.cm.hsv.set_bad(cpl.colormaps.bad_color)
ccb_syns = ( (cpl.SynType('AMPA' , 1, 'maroon'), cpl.SynType('NMDA' , 1, (0.9, 0.5, 0.1))),
(cpl.SynType('GABA_A', -1, '0.7' ), cpl.SynType('GABA_B', 1, pylab.cm.hsv)) )
ccb_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=ccb_syns)
ccb_cp.plot(aggrGroups=True)
pylab.show()
#! Other configuration options
#! ---------------------------
#!
#! Some more adjustments are possible by setting certain module properties.
#! Some of these need to be set before ConnectionPattern() is constructed.
#!
#! Background color for masked parts of each patch
cpl.colormaps.bad_color = 'cyan'
#! Background for layers
cpl.plotParams.layer_bg = (0.8, 0.8, 0.0)
#! Resolution for patch computation
cpl.plotParams.n_kern = 5
#! Physical size of patches: longest egde of largest patch, in mm
cpl.plotParams.patch_size = 40
#! Margins around the figure (excluding labels)
cpl.plotParams.margins.left = 40
cpl.plotParams.margins.top = 30
cpl.plotParams.margins.bottom = 15
cpl.plotParams.margins.right = 30
#! Fonts for layer and population labels
import matplotlib.font_manager as fmgr
cpl.plotParams.layer_font = fmgr.FontProperties(family='serif', weight='bold', size='xx-large')
cpl.plotParams.pop_font = fmgr.FontProperties('small')
#! Orientation for layer and population label
cpl.plotParams.layer_orientation = {'sender': 'vertical', 'target': 60}
cpl.plotParams.pop_orientation = {'sender': 'horizontal', 'target': -45}
#! Font for legend titles and ticks, tick placement, and tick format
cpl.plotParams.legend_title_font = fmgr.FontProperties(family='serif', weight='bold', size='large')
cpl.plotParams.legend_tick_font = fmgr.FontProperties(family='sans-serif', weight='light', size='xx-small')
cpl.plotParams.legend_ticks = [0,1,2]
cpl.plotParams.legend_tick_format = '%.1f pA'
cx_cp = cpl.ConnectionPattern(c_layer, c_conn)
cx_cp.plot(colorLimits=[0,2])
pylab.show()
#! Several more options are available to control the format of the color bars (they all are members of plotParams):
#! * legend_location : if 'top', place synapse name atop color bar
#! * cbwidth : width of single color bar relative to figure
#! * margins.colbar : height of lower margin set aside for color bar, in mm
#! * cbheight : height of single color bar relative to margins.colbar
#! * cbwidth : width of single color bar relative to figure width
#! * cbspace : spacing between color bars, relative to figure width
#! * cboffset : offset of first color bar from left margin, relative to figure width
#! You can also specify the width of the final figure, but this may not work
#! well with on-screen display or here in pyreport. Width is in mm.
#! Note that left and right margin combined are 70mm wide, so only 50mm are left
#! for the actual CPT.
cx_cp.plot(fixedWidth=120)
pylab.show()
#! If not using pyreport, we finally show and block
if not using_pyreport:
print("")
print("The connplotter_tutorial script is done. Call pylab.show() and enjoy the figures!")
print("You may need to close all figures manually to get the Python prompt back.")
print("")
pylab.show = pylab_show
| gpl-2.0 |
xubenben/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
dialounke/pylayers | pylayers/location/geometric/util/cdf2.py | 2 | 3799 | # -*- coding:Utf-8 -*-
import numpy as np
import scipy as sp
import os
import matplotlib.pyplot as plt
import pdb
try:
import mplrc.ieee.transaction
except:
pass
from matplotlib import rcParams
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
class CDF(object):
def __init__(self, ld, filename='cdf'):
"""
cdf = CDF(ld)
ld is a list of dictionnary
d0 = ld[0]
d0['bound'] : bornes en abscisses de la cdf 0
d0['values'] : valeurs
d0['xlabel'] :
d0['ylabel'] :
d0['legend'] : legend
d0['title] : title
d0['filename] : filename
d0['linewidth'] : linewidth
"""
self.ld = ld
self.parmsh = {}
self.parmsh['file'] = True
self.filename = filename
self.cdf = []
for d in self.ld:
try:
self.save=d['save']
except:
self.save=True
bound = d['bound']
values = d['values']
Nv = len(values)
cdf = np.array([])
for k in bound:
u = np.nonzero(values <= k)
lu = len(u[0]) / (Nv * 1.0)
cdf = np.hstack((cdf, lu))
self.cdf.append(cdf)
def show(self):
"""
show()
"""
f = plt.figure()
leg = []
c = []
ax = f.add_subplot(111)
for k in range(len(self.ld)):
d = self.ld[k]
bound = d['bound']
marker = d['marker']
markersize = d['markersize']
markercolor = d['markercolor']
markerfrequency = d['markerfrequency']
linewidth = d['linewidth']
line = d['line']
color = d['color']
legend = d['legend']
cdf = self.cdf[k]
c.append(
ax.plot(bound, cdf,
marker=marker,
markevery=markerfrequency,
ms=markersize,
mfc=markercolor,
ls=line, c=color,
linewidth=linewidth,
label=legend))
plt.xlabel(self.ld[0]['xlabel'])
plt.ylabel(self.ld[0]['ylabel'])
ax.legend(loc='best', scatterpoints=1, numpoints=1.)
plt.grid()
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
if self.save :
if os.system('cd ./cdf/'+self.filename) == 512:
os.system('mkdir ./cdf/'+self.filename)
plt.savefig('./cdf/' + self.filename + '/' + self.filename + '.pdf',
format='pdf', bbox_inches='tight', pad_inches=0)
plt.savefig('./cdf/' + self.filename + '/' + self.filename + '.eps',
format='eps', bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
d0 = {}
d0['values'] = sp.randn(1000)
d0['bound'] = np.arange(-10, 10, 0.1)
d0['xlabel'] = 'xlabel'
d0['ylabel'] = 'ylabel'
d0['legend'] = 'legend '
d0['title'] = 'title'
d0['marker'] = '*'
d0['line'] = '-'
d0['color'] = 'red'
d0['markersize'] = 10
d0['markercolor']='b'
d0['markerfrequency']=4
d0['linewidth'] = 3
d0['filename'] = 'essai.png'
d1 = {}
d1['values'] = 4 * sp.randn(1000)
d1['bound'] = np.arange(-10, 10, 0.1)
d1['xlabel'] = 'xlabel'
d1['ylabel'] = 'ylabel'
d1['legend'] = 'legend '
d1['title'] = 'title'
d1['marker'] = 'o'
d1['color'] = 'red'
d1['markersize'] = 10
d1['markercolor']='b'
d1['markerfrequency']=4
d1['line'] = '-'
d1['linewidth'] = 3
lv = [d0, d1]
c = CDF(lv,'filename')
| mit |
yanlend/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 13 | 25520 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
hobson/pug-invest | pug/invest/bin/thresh-test.py | 1 | 2097 | from pug.invest.util import clipped_area
# from scipy.optimize import minimize
import pandas as pd
from matplotlib import pyplot as plt
np = pd.np
t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
thresh=234
capacity=562.5 # barely enough to do anything
clipped_area(ts, thresh=thresh)
pd.DataFrame({'ts': ts, 'thresh': pd.Series(thresh*np.ones(len(ts)), index=ts.index)}).plot()
# invest.util.clipping_params()
ts.index = ts.index.astype(np.int64)
costs = []
def cost_fun(x, *args):
thresh = x[0]
ts, capacity, bounds, costs = args
integral = clipped_area(ts, thresh=thresh)
cost = (integral - capacity) ** 2
cost /= ((thresh - bounds[0] + sum(bounds))**2)**0.5
cost /= ((thresh - bounds[1] + sum(bounds))**2)**0.5
costs += [(thresh, cost, integral)]
return cost
bounds = (ts.min(), ts.max())
# thresh0 = 0.9*bounds[1] + 0.1*bounds[0]
# optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds, costs))
# thresh = optimum.x[0]
# integral = clipped_area(ts, thresh=thresh)
rows = []
threshes = np.arange(bounds[0]*.9, 1.1*bounds[1], (1.1*bounds[1]-.9*bounds[0])/1000.)
for thresh in threshes:
integral = clipped_area(ts, thresh=thresh)
terms = np.array([(100. * (integral - capacity) / capacity) ** 2,
1. / (((thresh - bounds[0]) / max(bounds))**2)**0.5,
1. / (((thresh - bounds[1]) / max(bounds))**2)**0.5,
1.1 ** (integral / capacity)])
row = [thresh, integral] + list(terms) + [np.sum(terms)]
rows += [row]
labels = ['threshold', 'integral', 'capacity-term', 'lower-bound-term', 'upper-bound-term', 'exponential-capacity-term', 'total-cost']
df = pd.DataFrame(rows, columns=labels)
df2 = pd.DataFrame(df[[c for c in df.columns if c[-1] == 'm' or c[-1]=='t']])
df2.index = df['thresh']
df2.plot(logy=True)
plt.show()
| mit |
xiaozhuchacha/OpenBottle | grammar_induction/earley_parser/nltk/tbl/demo.py | 7 | 14715 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, absolute_import, division
import os
import pickle
import random
import time
from nltk.corpus import treebank
from nltk.tbl import error_list, Template
from nltk.tag.brill import Word, Pos
from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger
def demo():
"""
Run a demo with defaults. See source comments for details,
or docstrings of any of the more specific demo_* functions.
"""
postag()
def demo_repr_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="repr")
def demo_str_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="str")
def demo_verbose_rule_format():
"""
Exemplify Rule.format("verbose")
"""
postag(ruleformat="verbose")
def demo_multiposition_feature():
"""
The feature/s of a template takes a list of positions
relative to the current word where the feature should be
looked for, conceptually joined by logical OR. For instance,
Pos([-1, 1]), given a value V, will hold whenever V is found
one step to the left and/or one step to the right.
For contiguous ranges, a 2-arg form giving inclusive end
points can also be used: Pos(-3, -1) is the same as the arg
below.
"""
postag(templates=[Template(Pos([-3,-2,-1]))])
def demo_multifeature_template():
"""
Templates can have more than a single feature.
"""
postag(templates=[Template(Word([0]), Pos([-2,-1]))])
def demo_template_statistics():
"""
Show aggregate statistics per template. Little used templates are
candidates for deletion, much used templates may possibly be refined.
Deleting unused templates is mostly about saving time and/or space:
training is basically O(T) in the number of templates T
(also in terms of memory usage, which often will be the limiting factor).
"""
postag(incremental_stats=True, template_stats=True)
def demo_generated_templates():
"""
Template.expand and Feature.expand are class methods facilitating
generating large amounts of templates. See their documentation for
details.
Note: training with 500 templates can easily fill all available
even on relatively small corpora
"""
wordtpls = Word.expand([-1,0,1], [1,2], excludezero=False)
tagtpls = Pos.expand([-2,-1,0,1], [1,2], excludezero=True)
templates = list(Template.expand([wordtpls, tagtpls], combinations=(1,3)))
print("Generated {0} templates for transformation-based learning".format(len(templates)))
postag(templates=templates, incremental_stats=True, template_stats=True)
def demo_learning_curve():
"""
Plot a learning curve -- the contribution on tagging accuracy of
the individual rules.
Note: requires matplotlib
"""
postag(incremental_stats=True, separate_baseline_data=True, learning_curve_output="learningcurve.png")
def demo_error_analysis():
"""
Writes a file with context for each erroneous word after tagging testing data
"""
postag(error_output="errors.txt")
def demo_serialize_tagger():
"""
Serializes the learned tagger to a file in pickle format; reloads it
and validates the process.
"""
postag(serialize_output="tagger.pcl")
def demo_high_accuracy_rules():
"""
Discard rules with low accuracy. This may hurt performance a bit,
but will often produce rules which are more interesting read to a human.
"""
postag(num_sents=3000, min_acc=0.96, min_score=10)
def postag(
templates=None,
tagged_data=None,
num_sents=1000,
max_rules=300,
min_score=3,
min_acc=None,
train=0.8,
trace=3,
randomize=False,
ruleformat="str",
incremental_stats=False,
template_stats=False,
error_output=None,
serialize_output=None,
learning_curve_output=None,
learning_curve_take=300,
baseline_backoff_tagger=None,
separate_baseline_data=False,
cache_baseline_tagger=None):
"""
Brill Tagger Demonstration
:param templates: how many sentences of training and testing data to use
:type templates: list of Template
:param tagged_data: maximum number of rule instances to create
:type tagged_data: C{int}
:param num_sents: how many sentences of training and testing data to use
:type num_sents: C{int}
:param max_rules: maximum number of rule instances to create
:type max_rules: C{int}
:param min_score: the minimum score for a rule in order for it to be considered
:type min_score: C{int}
:param min_acc: the minimum score for a rule in order for it to be considered
:type min_acc: C{float}
:param train: the fraction of the the corpus to be used for training (1=all)
:type train: C{float}
:param trace: the level of diagnostic tracing output to produce (0-4)
:type trace: C{int}
:param randomize: whether the training data should be a random subset of the corpus
:type randomize: C{bool}
:param ruleformat: rule output format, one of "str", "repr", "verbose"
:type ruleformat: C{str}
:param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow)
:type incremental_stats: C{bool}
:param template_stats: if true, will print per-template statistics collected in training and (optionally) testing
:type template_stats: C{bool}
:param error_output: the file where errors will be saved
:type error_output: C{string}
:param serialize_output: the file where the learned tbl tagger will be saved
:type serialize_output: C{string}
:param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available)
:type learning_curve_output: C{string}
:param learning_curve_take: how many rules plotted
:type learning_curve_take: C{int}
:param baseline_backoff_tagger: the file where rules will be saved
:type baseline_backoff_tagger: tagger
:param separate_baseline_data: use a fraction of the training data exclusively for training baseline
:type separate_baseline_data: C{bool}
:param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get
deterministic output from the baseline unigram tagger between python versions)
:type cache_baseline_tagger: C{string}
Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This
is fast and fine for a demo, but is likely to generalize worse on unseen data.
Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high).
"""
# defaults
baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER
if templates is None:
from nltk.tag.brill import describe_template_sets, brill24
# some pre-built template sets taken from typical systems or publications are
# available. Print a list with describe_template_sets()
# for instance:
templates = brill24()
(training_data, baseline_data, gold_data, testing_data) = \
_demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data)
# creating (or reloading from cache) a baseline tagger (unigram tagger)
# this is just a mechanism for getting deterministic output from the baseline between
# python versions
if cache_baseline_tagger:
if not os.path.exists(cache_baseline_tagger):
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
with open(cache_baseline_tagger, 'w') as print_rules:
pickle.dump(baseline_tagger, print_rules)
print("Trained baseline tagger, pickled it to {0}".format(cache_baseline_tagger))
with open(cache_baseline_tagger, "r") as print_rules:
baseline_tagger= pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(cache_baseline_tagger))
else:
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
print("Trained baseline tagger")
if gold_data:
print(" Accuracy on test set: {0:0.4f}".format(baseline_tagger.evaluate(gold_data)))
# creating a Brill tagger
tbrill = time.time()
trainer = BrillTaggerTrainer(baseline_tagger, templates, trace, ruleformat=ruleformat)
print("Training tbl tagger...")
brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc)
print("Trained tbl tagger in {0:0.2f} seconds".format(time.time() - tbrill))
if gold_data:
print(" Accuracy on test set: %.4f" % brill_tagger.evaluate(gold_data))
# printing the learned rules, if learned silently
if trace == 1:
print("\nLearned rules: ")
for (ruleno, rule) in enumerate(brill_tagger.rules(),1):
print("{0:4d} {1:s}".format(ruleno, rule.format(ruleformat)))
# printing template statistics (optionally including comparison with the training data)
# note: if not separate_baseline_data, then baseline accuracy will be artificially high
if incremental_stats:
print("Incrementally tagging the test data, collecting individual rule statistics")
(taggedtest, teststats) = brill_tagger.batch_tag_incremental(testing_data, gold_data)
print(" Rule statistics collected")
if not separate_baseline_data:
print("WARNING: train_stats asked for separate_baseline_data=True; the baseline "
"will be artificially high")
trainstats = brill_tagger.train_stats()
if template_stats:
brill_tagger.print_template_statistics(teststats)
if learning_curve_output:
_demo_plot(learning_curve_output, teststats, trainstats, take=learning_curve_take)
print("Wrote plot of learning curve to {0}".format(learning_curve_output))
else:
print("Tagging the test data")
taggedtest = brill_tagger.tag_sents(testing_data)
if template_stats:
brill_tagger.print_template_statistics()
# writing error analysis to file
if error_output is not None:
with open(error_output, 'w') as f:
f.write('Errors for Brill Tagger %r\n\n' % serialize_output)
f.write(u'\n'.join(error_list(gold_data, taggedtest)).encode('utf-8') + '\n')
print("Wrote tagger errors including context to {0}".format(error_output))
# serializing the tagger to a pickle file and reloading (just to see it works)
if serialize_output is not None:
taggedtest = brill_tagger.tag_sents(testing_data)
with open(serialize_output, 'w') as print_rules:
pickle.dump(brill_tagger, print_rules)
print("Wrote pickled tagger to {0}".format(serialize_output))
with open(serialize_output, "r") as print_rules:
brill_tagger_reloaded = pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(serialize_output))
taggedtest_reloaded = brill_tagger.tag_sents(testing_data)
if taggedtest == taggedtest_reloaded:
print("Reloaded tagger tried on test set, results identical")
else:
print("PROBLEM: Reloaded tagger gave different results on test set")
def _demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data):
# train is the proportion of data used in training; the rest is reserved
# for testing.
if tagged_data is None:
print("Loading tagged data from treebank... ")
tagged_data = treebank.tagged_sents()
if num_sents is None or len(tagged_data) <= num_sents:
num_sents = len(tagged_data)
if randomize:
random.seed(len(tagged_data))
random.shuffle(tagged_data)
cutoff = int(num_sents * train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:num_sents]
testing_data = [[t[0] for t in sent] for sent in gold_data]
if not separate_baseline_data:
baseline_data = training_data
else:
bl_cutoff = len(training_data) // 3
(baseline_data, training_data) = (training_data[:bl_cutoff], training_data[bl_cutoff:])
(trainseqs, traintokens) = corpus_size(training_data)
(testseqs, testtokens) = corpus_size(testing_data)
(bltrainseqs, bltraintokens) = corpus_size(baseline_data)
print("Read testing data ({0:d} sents/{1:d} wds)".format(testseqs, testtokens))
print("Read training data ({0:d} sents/{1:d} wds)".format(trainseqs, traintokens))
print("Read baseline data ({0:d} sents/{1:d} wds) {2:s}".format(
bltrainseqs, bltraintokens, "" if separate_baseline_data else "[reused the training set]"))
return (training_data, baseline_data, gold_data, testing_data)
def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
testcurve = [teststats['initialerrors']]
for rulescore in teststats['rulescores']:
testcurve.append(testcurve[-1] - rulescore)
testcurve = [1 - x/teststats['tokencount'] for x in testcurve[:take]]
traincurve = [trainstats['initialerrors']]
for rulescore in trainstats['rulescores']:
traincurve.append(traincurve[-1] - rulescore)
traincurve = [1 - x/trainstats['tokencount'] for x in traincurve[:take]]
import matplotlib.pyplot as plt
r = list(range(len(testcurve)))
plt.plot(r, testcurve, r, traincurve)
plt.axis([None, None, None, 1.0])
plt.savefig(learning_curve_output)
NN_CD_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'.*', 'NN')])
REGEXP_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
def corpus_size(seqs):
return (len(seqs), sum(len(x) for x in seqs))
if __name__ == '__main__':
demo_learning_curve()
| mit |
bthirion/scikit-learn | examples/classification/plot_classifier_comparison.py | 26 | 5236 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
bnaul/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 15 | 5402 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
# This is to avoid division by zero while doing np.log10
EPSILON = 1e-4
X, y = datasets.load_diabetes(return_X_y=True)
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
# #############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
criterion_ = model.criterion_
plt.semilogx(model.alphas_ + EPSILON, criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(model.alpha_ + EPSILON, color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel(r'$\alpha$')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
# #############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
plt.figure()
ymin, ymax = 2300, 3800
plt.semilogx(model.alphas_ + EPSILON, model.mse_path_, ':')
plt.plot(model.alphas_ + EPSILON, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(model.alpha_ + EPSILON, linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel(r'$\alpha$')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
# #############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
plt.figure()
plt.semilogx(model.cv_alphas_ + EPSILON, model.mse_path_, ':')
plt.semilogx(model.cv_alphas_ + EPSILON, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(model.alpha_, linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel(r'$\alpha$')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
shivamvats/graphSearch | main_mha.py | 1 | 2271 | from heuristicSearch.planners.mhastar import MHAstar
from heuristicSearch.envs.env import GridEnvironment
from heuristicSearch.envs.occupancy_grid import OccupancyGrid
from heuristicSearch.graph.node import Node
from heuristicSearch.utils.visualizer import ImageVisualizer
from heuristicSearch.utils.utils import *
from functools import partial
import matplotlib.pyplot as plt
import cv2 as cv
import pickle
import sys
def main():
"""Numpy array is accessed as (r, c) while a point is (x, y). The code
follows (r, c) convention everywhere. Hence, be careful whenever using a
point with opencv."""
folder = sys.argv[1]
image = folder + "/image.png"
start_goal = folder + "/start_goal.pkl"
startPoint, goalPoint = pickle.load( open(start_goal, "rb") )
occGrid = OccupancyGrid()
occMap = occGrid.getMapFromImage(image)
viz = ImageVisualizer(occMap)
viz.incrementalDisplay = True
print(occMap.shape)
print(startPoint, goalPoint)
gridEnv = GridEnvironment(occMap, occMap.shape[0], occMap.shape[1])
# List of two heuristics.
heuristics = (gridEnv.euclideanHeuristic, gridEnv.diagonalHeuristic)
gridEnv.setHeuristic(heuristics)
startNode = Node(gridEnv.getIdFromPoint(startPoint))
startNode.setParent(None)
goalNode = Node(gridEnv.getIdFromPoint(goalPoint))
gridEnv.addNode(goalNode)
gridEnv.goal(goalNode)
assert(gridEnv.isValidPoint(startPoint))
assert(gridEnv.isValidPoint(goalPoint))
# Planner
planner = MHAstar( gridEnv, w1=5, w2=5 )
planFound = planner.plan(startNode, goalNode, viz=viz)
path = []
if planFound:
print("Planning successful")
currNode = goalNode
while(currNode != startNode):
path.append(currNode)
currNode = currNode.getParent()
# Reverse the list.
path = path[::-1]
planStateIds = map(lambda node : node.getNodeId(), path)
pathPoints = []
for node in path:
pathPoints.append(gridEnv.getPointFromId(node.getNodeId()))
viz.displayImage()
#viz.joinPointsInOrder(pathPoints, thickness=5)
viz.markPoints( pathPoints, color=100 )
viz.displayImage()
cv.waitKey(0)
#cv.imwrite( )
main()
| mit |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/path.py | 69 | 20263 | """
Contains a class for managing paths (polylines).
"""
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib._path import point_in_path, get_path_extents, \
point_in_path_collection, get_path_collection_extents, \
path_in_path, path_intersects_path, convert_path_to_polygons
from matplotlib.cbook import simple_linear_interpolation
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
to get the vertex/code pairs. This is important, since many
:class:`Path` objects, as an optimization, do not store a *codes*
at all, but have a default one provided for them by
:meth:`iter_segments`.
Note also that the vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 5 # 1 vertex
NUM_VERTICES = [1, 1, 1, 2, 3, 1]
code_type = np.uint8
def __init__(self, vertices, codes=None):
"""
Create a new path with the given vertices and codes.
*vertices* is an Nx2 numpy float array, masked array or Python
sequence.
*codes* is an N-length numpy array or Python sequence of type
:attr:`matplotlib.path.Path.code_type`.
These two arrays must have the same length in the first
dimension.
If *codes* is None, *vertices* will be treated as a series of
line segments.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if codes is not None:
codes = np.asarray(codes, self.code_type)
assert codes.ndim == 1
assert len(codes) == len(vertices)
assert vertices.ndim == 2
assert vertices.shape[1] == 2
self.should_simplify = (len(vertices) >= 128 and
(codes is None or np.all(codes <= Path.LINETO)))
self.has_nonfinite = not np.isfinite(vertices).all()
self.codes = codes
self.vertices = vertices
#@staticmethod
def make_compound_path(*args):
"""
(staticmethod) Make a compound path from a list of Path
objects. Only polygons (not curves) are supported.
"""
for p in args:
assert p.codes is None
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = Path.LINETO * np.ones(total_length)
i = 0
for length in lengths:
codes[i] = Path.MOVETO
i += length
return Path(vertices, codes)
make_compound_path = staticmethod(make_compound_path)
def __repr__(self):
return "Path(%s, %s)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, simplify=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
If *simplify* is provided, it must be a tuple (*width*,
*height*) defining the size of the figure, in native units
(e.g. pixels or points). Simplification implies both removing
adjacent line segments that are very close to parallel, and
removing line segments outside of the figure. The path will
be simplified *only* if :attr:`should_simplify` is True, which
is determined in the constructor by this criteria:
- No curves
- More than 128 vertices
"""
vertices = self.vertices
if not len(vertices):
return
codes = self.codes
len_vertices = len(vertices)
isfinite = np.isfinite
NUM_VERTICES = self.NUM_VERTICES
MOVETO = self.MOVETO
LINETO = self.LINETO
CLOSEPOLY = self.CLOSEPOLY
STOP = self.STOP
if simplify is not None and self.should_simplify:
polygons = self.to_polygons(None, *simplify)
for vertices in polygons:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
elif codes is None:
if self.has_nonfinite:
next_code = MOVETO
for v in vertices:
if np.isfinite(v).all():
yield v, next_code
next_code = LINETO
else:
next_code = MOVETO
else:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
else:
i = 0
was_nan = False
while i < len_vertices:
code = codes[i]
if code == CLOSEPOLY:
yield [], code
i += 1
elif code == STOP:
return
else:
num_vertices = NUM_VERTICES[int(code)]
curr_vertices = vertices[i:i+num_vertices].flatten()
if not isfinite(curr_vertices).all():
was_nan = True
elif was_nan:
yield curr_vertices[-2:], MOVETO
was_nan = False
else:
yield curr_vertices, code
i += num_vertices
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`:
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes)
def contains_point(self, point, transform=None):
"""
Returns *True* if the path contains the given point.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return point_in_path(point[0], point[1], self, transform)
def contains_path(self, path, transform=None):
"""
Returns *True* if this path completely contains the given path.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from transforms import Bbox
if transform is not None:
transform = transform.frozen()
return Bbox(get_path_extents(self, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0):
"""
Convert this path to a list of polygons. Each polygon is an
Nx2 array of vertices. In other words, each polygon has no
``MOVETO`` instructions or curves. This is useful for
displaying in backends that do not support compound paths or
Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
if transform is None:
return [self.vertices]
else:
return [transform.transform(self.vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return convert_path_to_polygons(self, transform, width, height)
_unit_rectangle = None
#@classmethod
def unit_rectangle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]])
return cls._unit_rectangle
unit_rectangle = classmethod(unit_rectangle)
_unit_regular_polygons = WeakValueDictionary()
#@classmethod
def unit_regular_polygon(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
path = Path(verts)
cls._unit_regular_polygons[numVertices] = path
return path
unit_regular_polygon = classmethod(unit_regular_polygon)
_unit_regular_stars = WeakValueDictionary()
#@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
(staticmethod) Returns a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
path = Path(verts)
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
unit_regular_star = classmethod(unit_regular_star)
#@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
unit_regular_asterisk = classmethod(unit_regular_asterisk)
_unit_circle = None
#@classmethod
def unit_circle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit circle.
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(26)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle = Path(vertices, codes)
return cls._unit_circle
unit_circle = classmethod(unit_circle)
#@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
(staticmethod) Returns an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
if (theta2 - theta1 > np.pi) and (eta2 - eta1 < np.pi):
eta2 += twopi
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [Path.MOVETO, Path.LINETO]
codes[-2:] = [Path.LINETO, Path.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = Path.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset :end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset :end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return Path(vertices, codes)
arc = classmethod(arc)
#@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
(staticmethod) Returns a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
wedge = classmethod(wedge)
_get_path_collection_extents = get_path_collection_extents
def get_path_collection_extents(*args):
"""
Given a sequence of :class:`Path` objects, returns the bounding
box that encapsulates all of them.
"""
from transforms import Bbox
if len(args[1]) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_get_path_collection_extents(*args))
| agpl-3.0 |
kipohl/ncanda-data-integration | scripts/redcap/scoring/pgd/__init__.py | 2 | 2426 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
import pandas
import RwrapperNew
#
# Variables from surveys needed for PGD
#
# LimeSurvey field names
lime_fields = [ "PGD_sec1 [pgd1]", "PGD_sec1 [pgd2]", "PGD_sec1 [pgd3]", "PGD_sec1 [pgd4]", "PGD_sec1 [pgd5]", "PGD_sec1 [pgd6]", "PGD_sec2 [pgd7]",
"PGD_sec2 [pgd8]", "PGD_sec2 [pgd9]", "PGD_sec2 [pgd10]", "PGD_sec2 [pgd11]", "PGD_sec2 [pgd12]" ]
# Dictionary to recover LimeSurvey field names from REDCap names
rc2lime = dict()
for field in lime_fields:
rc2lime[RwrapperNew.label_to_sri( 'youthreport2', field )] = field
# REDCap fields names
input_fields = { 'mrireport' : [ 'youth_report_2_complete', 'youthreport2_missing' ] + rc2lime.keys() }
#
# This determines the name of the form in REDCap where the results are posted.
#
output_form = 'clinical'
#
# PGD field names mapping from R to REDCap
#
R2rc = { 'PGD.SUM' : 'pgd_score' }
#
# Scoring function - take requested data (as requested by "input_fields") for each (subject,event), and demographics (date of birth, gender) for each subject.
#
def compute_scores( data, demographics ):
# Get rid of all records that don't have YR2
data.dropna( axis=1, subset=['youth_report_2_complete'] )
data = data[ data['youth_report_2_complete'] > 0 ]
data = data[ ~(data['youthreport2_missing'] > 0) ]
# If no records to score, return empty DF
if len( data ) == 0:
return pandas.DataFrame()
# Replace all column labels with the original LimeSurvey names
data.columns = RwrapperNew.map_labels( data.columns, rc2lime )
# Call the scoring function for all table rows
scores = data.apply( RwrapperNew.runscript, axis=1, Rscript='pgd/PGD.R' )
# Replace all score columns with REDCap field names
scores.columns = RwrapperNew.map_labels( scores.columns, R2rc )
# Simply copy completion status from the input surveys
scores['pgd_complete'] = data['youth_report_2_complete'].map( int )
# Make a proper multi-index for the scores table
scores.index = pandas.MultiIndex.from_tuples(scores.index)
scores.index.names = ['study_id', 'redcap_event_name']
# Return the computed scores - this is what will be imported back into REDCap
outfield_list = [ 'pgd_complete' ] + R2rc.values()
return scores[ outfield_list ]
| bsd-3-clause |
chrisburr/scikit-learn | examples/linear_model/plot_ard.py | 18 | 2827 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
CVML/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
ai-se/XTREE | src/Planners/XTREE/Prediction.py | 1 | 8693 | from __future__ import division
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from scipy.stats.mstats import mode
from scipy.spatial.distance import euclidean
from numpy import mean
from random import choice, uniform as rand
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from smote import *
import pandas as pd
from tools.axe.abcd import _Abcd
from methods1 import *
from tools.sk import rdivDemo
def formatData(tbl):
Rows = [i.cells for i in tbl._rows]
headers = [i.name for i in tbl.headers]
return pd.DataFrame(Rows, columns=headers)
def Bugs(tbl):
cells = [i.cells[-2] for i in tbl._rows]
return cells
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PREDICTION SYSTEMS:
# ```````````````````
# 1. WHERE2 2. RANDOM FORESTS, 3. DECISION TREES, 4. ADABOOST,
# 5. LOGISTIC REGRESSION
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def where2prd(train, test, tunings=[None, None], smoteit=False, thresh=1):
"WHERE2"
def flatten(x):
"""
Takes an N times nested list of list like [[a,b],[c, [d, e]],[f]]
and returns a single list [a,b,c,d,e,f]
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def leaves(node):
"""
Returns all terminal nodes.
"""
L = []
if len(node.kids) > 1:
for l in node.kids:
L.extend(leaves(l))
return L
elif len(node.kids) == 1:
return [node.kids]
else:
return [node]
train_DF = createTbl(
train,
settings=tunings[0],
_smote=False,
isBin=True,
bugThres=2)
test_df = createTbl(test)
t = discreteNums(train_DF, map(lambda x: x.cells, train_DF._rows))
myTree = tdiv(t, opt=tunings[1])
testCase = test_df._rows
rows, preds = [], []
for tC in testCase:
newRow = tC
loc = drop(tC, myTree) # Drop a test case in the tree & see where it lands
leafNodes = flatten(leaves(loc))
# set_trace()
rows = [leaf.rows for leaf in leafNodes][0]
vals = [r.cells[-2] for r in rows]
preds.append(0 if mean([k for k in vals]).tolist() < thresh else 1)
# if median(vals) > 0 else preds.extend([0])
return preds
def _where2pred():
"Test where2"
dir = '../Data'
one, two = explore(dir)
# set_trace()
# Training data
train = one[0][:-1]
# Test data
test = [one[0][-1]]
actual = Bugs(createTbl(test, isBin=True))
preds = where2prd(train, test)
# for a, b in zip(actual, preds): print a, b
# set_trace()
return _Abcd(before=actual, after=preds, show=False)[-1]
def rforest(train, test, tunings=None, smoteit=True, duplicate=True):
"RF "
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = RandomForestClassifier(n_estimators=100, random_state=1)
else:
clf = RandomForestClassifier(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3])
)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def rforest2(train, test, tunings=None, smoteit=True, duplicate=True):
"RF "
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = RandomForestRegressor(n_estimators=100, random_state=1)
else:
clf = RandomForestRegressor(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3])
)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def _RF():
"Test RF"
dir = '../Data'
one, two = explore(dir)
# Training data
train_DF = createTbl([one[0][0]])
# Test data
test_df = createTbl([one[0][1]])
actual = Bugs(test_df)
preds = rforest(train_DF, test_df, mss=6, msl=8,
max_feat=4, n_est=5756,
smoteit=False)
print _Abcd(before=actual, after=preds, show=False)[-1]
def CART(train, test, tunings=None, smoteit=True, duplicate=True):
" CART"
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = DecisionTreeClassifier()
else:
clf = DecisionTreeClassifier(max_depth=int(tunings[0]),
min_samples_split=int(tunings[1]),
min_samples_leaf=int(tunings[2]),
max_features=float(tunings[3] / 100),
max_leaf_nodes=int(tunings[4]),
criterion='entropy')
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features].astype('float32'), klass.astype('float32'))
preds = clf.predict(test_DF[test_DF.columns[:-2]].astype('float32')).tolist()
return preds
def _CART():
"Test CART"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = CART(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def adaboost(train, test, smoteit=True):
"ADABOOST"
if smoteit:
train = SMOTE(train)
clf = AdaBoostClassifier()
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
def _adaboost():
"Test AdaBoost"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = adaboost(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def logit(train, test, smoteit=True):
"Logistic Regression"
if smoteit:
train = SMOTE(train)
clf = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
def _logit():
"Test LOGIT"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = logit(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def knn(train, test, smoteit=True):
"kNN"
if smoteit:
train = SMOTE(train)
neigh = KNeighborsClassifier()
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
neigh.fit(train_DF[features], klass)
preds = neigh.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
if __name__ == '__main__':
random.seed(0)
Dat = []
for _ in xrange(10):
print(_where2pred())
# Dat.insert(0, 'Where2 untuned')
# rdivDemo([Dat])
| mit |
jviada/QuantEcon.py | examples/lqramsey.py | 4 | 9949 | """
Filename: lqramsey.py
Authors: Thomas Sargent, Doc-Jin Jang, Jeong-hun Choi, John Stachurski
This module provides code to compute Ramsey equilibria in a LQ economy with
distortionary taxation. The program computes allocations (consumption,
leisure), tax rates, revenues, the net present value of the debt and other
related quantities.
Functions for plotting the results are also provided below.
See the lecture at http://quant-econ.net/py/lqramsey.html for a description of
the model.
"""
import sys
import numpy as np
from numpy import sqrt, eye, dot, zeros, cumsum
from numpy.random import randn
import scipy.linalg
import matplotlib.pyplot as plt
from collections import namedtuple
from quantecon import nullspace, mc_sample_path, var_quadratic_sum
# == Set up a namedtuple to store data on the model economy == #
Economy = namedtuple('economy',
('beta', # Discount factor
'Sg', # Govt spending selector matrix
'Sd', # Exogenous endowment selector matrix
'Sb', # Utility parameter selector matrix
'Ss', # Coupon payments selector matrix
'discrete', # Discrete or continuous -- boolean
'proc')) # Stochastic process parameters
# == Set up a namedtuple to store return values for compute_paths() == #
Path = namedtuple('path',
('g', # Govt spending
'd', # Endowment
'b', # Utility shift parameter
's', # Coupon payment on existing debt
'c', # Consumption
'l', # Labor
'p', # Price
'tau', # Tax rate
'rvn', # Revenue
'B', # Govt debt
'R', # Risk free gross return
'pi', # One-period risk-free interest rate
'Pi', # Cumulative rate of return, adjusted
'xi')) # Adjustment factor for Pi
def compute_paths(T, econ):
"""
Compute simulated time paths for exogenous and endogenous variables.
Parameters
===========
T: int
Length of the simulation
econ: a namedtuple of type 'Economy', containing
beta - Discount factor
Sg - Govt spending selector matrix
Sd - Exogenous endowment selector matrix
Sb - Utility parameter selector matrix
Ss - Coupon payments selector matrix
discrete - Discrete exogenous process (True or False)
proc - Stochastic process parameters
Returns
========
path: a namedtuple of type 'Path', containing
g - Govt spending
d - Endowment
b - Utility shift parameter
s - Coupon payment on existing debt
c - Consumption
l - Labor
p - Price
tau - Tax rate
rvn - Revenue
B - Govt debt
R - Risk free gross return
pi - One-period risk-free interest rate
Pi - Cumulative rate of return, adjusted
xi - Adjustment factor for Pi
The corresponding values are flat numpy ndarrays.
"""
# == Simplify names == #
beta, Sg, Sd, Sb, Ss = econ.beta, econ.Sg, econ.Sd, econ.Sb, econ.Ss
if econ.discrete:
P, x_vals = econ.proc
else:
A, C = econ.proc
# == Simulate the exogenous process x == #
if econ.discrete:
state = mc_sample_path(P, init=0, sample_size=T)
x = x_vals[:, state]
else:
# == Generate an initial condition x0 satisfying x0 = A x0 == #
nx, nx = A.shape
x0 = nullspace((eye(nx) - A))
x0 = -x0 if (x0[nx-1] < 0) else x0
x0 = x0 / x0[nx-1]
# == Generate a time series x of length T starting from x0 == #
nx, nw = C.shape
x = zeros((nx, T))
w = randn(nw, T)
x[:, 0] = x0.T
for t in range(1, T):
x[:, t] = dot(A, x[:, t-1]) + dot(C, w[:, t])
# == Compute exogenous variable sequences == #
g, d, b, s = (dot(S, x).flatten() for S in (Sg, Sd, Sb, Ss))
# == Solve for Lagrange multiplier in the govt budget constraint == #
# In fact we solve for nu = lambda / (1 + 2*lambda). Here nu is the
# solution to a quadratic equation a(nu**2 - nu) + b = 0 where
# a and b are expected discounted sums of quadratic forms of the state.
Sm = Sb - Sd - Ss
# == Compute a and b == #
if econ.discrete:
ns = P.shape[0]
F = scipy.linalg.inv(np.identity(ns) - beta * P)
a0 = 0.5 * dot(F, dot(Sm, x_vals).T**2)[0]
H = dot(Sb - Sd + Sg, x_vals) * dot(Sg - Ss, x_vals)
b0 = 0.5 * dot(F, H.T)[0]
a0, b0 = float(a0), float(b0)
else:
H = dot(Sm.T, Sm)
a0 = 0.5 * var_quadratic_sum(A, C, H, beta, x0)
H = dot((Sb - Sd + Sg).T, (Sg + Ss))
b0 = 0.5 * var_quadratic_sum(A, C, H, beta, x0)
# == Test that nu has a real solution before assigning == #
warning_msg = """
Hint: you probably set government spending too {}. Elect a {}
Congress and start over.
"""
disc = a0**2 - 4 * a0 * b0
if disc >= 0:
nu = 0.5 * (a0 - sqrt(disc)) / a0
else:
print("There is no Ramsey equilibrium for these parameters.")
print(warning_msg.format('high', 'Republican'))
sys.exit(0)
# == Test that the Lagrange multiplier has the right sign == #
if nu * (0.5 - nu) < 0:
print("Negative multiplier on the government budget constraint.")
print(warning_msg.format('low', 'Democratic'))
sys.exit(0)
# == Solve for the allocation given nu and x == #
Sc = 0.5 * (Sb + Sd - Sg - nu * Sm)
Sl = 0.5 * (Sb - Sd + Sg - nu * Sm)
c = dot(Sc, x).flatten()
l = dot(Sl, x).flatten()
p = dot(Sb - Sc, x).flatten() # Price without normalization
tau = 1 - l / (b - c)
rvn = l * tau
# == Compute remaining variables == #
if econ.discrete:
H = dot(Sb - Sc, x_vals) * dot(Sl - Sg, x_vals) - dot(Sl, x_vals)**2
temp = dot(F, H.T).flatten()
B = temp[state] / p
H = dot(P[state, :], dot(Sb - Sc, x_vals).T).flatten()
R = p / (beta * H)
temp = dot(P[state, :], dot(Sb - Sc, x_vals).T).flatten()
xi = p[1:] / temp[:T-1]
else:
H = dot(Sl.T, Sl) - dot((Sb - Sc).T, Sl - Sg)
L = np.empty(T)
for t in range(T):
L[t] = var_quadratic_sum(A, C, H, beta, x[:, t])
B = L / p
Rinv = (beta * dot(dot(Sb - Sc, A), x)).flatten() / p
R = 1 / Rinv
AF1 = dot(Sb - Sc, x[:, 1:])
AF2 = dot(dot(Sb - Sc, A), x[:, :T-1])
xi = AF1 / AF2
xi = xi.flatten()
pi = B[1:] - R[:T-1] * B[:T-1] - rvn[:T-1] + g[:T-1]
Pi = cumsum(pi * xi)
# == Prepare return values == #
path = Path(g=g,
d=d,
b=b,
s=s,
c=c,
l=l,
p=p,
tau=tau,
rvn=rvn,
B=B,
R=R,
pi=pi,
Pi=Pi,
xi=xi)
return path
def gen_fig_1(path):
"""
The parameter is the path namedtuple returned by compute_paths(). See
the docstring of that function for details.
"""
T = len(path.c)
# == Prepare axes == #
num_rows, num_cols = 2, 2
fig, axes = plt.subplots(num_rows, num_cols, figsize=(14, 10))
plt.subplots_adjust(hspace=0.4)
for i in range(num_rows):
for j in range(num_cols):
axes[i, j].grid()
axes[i, j].set_xlabel(r'Time')
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'}
p_args = {'lw': 2, 'alpha': 0.7}
# == Plot consumption, govt expenditure and revenue == #
ax = axes[0, 0]
ax.plot(path.rvn, label=r'$\tau_t \ell_t$', **p_args)
ax.plot(path.g, label=r'$g_t$', **p_args)
ax.plot(path.c, label=r'$c_t$', **p_args)
ax.legend(ncol=3, **legend_args)
# == Plot govt expenditure and debt == #
ax = axes[0, 1]
ax.plot(list(range(1, T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args)
ax.plot(list(range(1, T+1)), path.g, label=r'$g_t$', **p_args)
ax.plot(list(range(1, T)), path.B[1:T], label=r'$B_{t+1}$', **p_args)
ax.legend(ncol=3, **legend_args)
# == Plot risk free return == #
ax = axes[1, 0]
ax.plot(list(range(1, T+1)), path.R - 1, label=r'$R_t - 1$', **p_args)
ax.legend(ncol=1, **legend_args)
# == Plot revenue, expenditure and risk free rate == #
ax = axes[1, 1]
ax.plot(list(range(1, T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args)
ax.plot(list(range(1, T+1)), path.g, label=r'$g_t$', **p_args)
axes[1, 1].plot(list(range(1, T)), path.pi, label=r'$\pi_{t+1}$', **p_args)
ax.legend(ncol=3, **legend_args)
plt.show()
def gen_fig_2(path):
"""
The parameter is the path namedtuple returned by compute_paths(). See
the docstring of that function for details.
"""
T = len(path.c)
# == Prepare axes == #
num_rows, num_cols = 2, 1
fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 10))
plt.subplots_adjust(hspace=0.5)
bbox = (0., 1.02, 1., .102)
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'}
p_args = {'lw': 2, 'alpha': 0.7}
# == Plot adjustment factor == #
ax = axes[0]
ax.plot(list(range(2, T+1)), path.xi, label=r'$\xi_t$', **p_args)
ax.grid()
ax.set_xlabel(r'Time')
ax.legend(ncol=1, **legend_args)
# == Plot adjusted cumulative return == #
ax = axes[1]
ax.plot(list(range(2, T+1)), path.Pi, label=r'$\Pi_t$', **p_args)
ax.grid()
ax.set_xlabel(r'Time')
ax.legend(ncol=1, **legend_args)
plt.show()
| bsd-3-clause |
EachenKuang/PythonRepository | MedicineSCI/document_word.py | 1 | 3290 | # -*- encoding: utf-8 -*-
import numpy as np
import pandas as pd
def generate_documents_words_matrix():
f1 = open('TF.txt', 'r') # Documents
f2 = open('termList.txt', 'r') # Words
# 词表
words_list = []
for line in f2.readlines():
words_list.append(line.strip())
documents_words_matrix = pd.DataFrame(np.zeros((8283, len(words_list))), columns=words_list, index=range(1, 8284))
article_index = None
for line in f1.readlines():
splits = line.split()
if len(splits) == 1 and article_index is None:
article_index = int(line.split('\\')[-1].split('.')[0])
article_words_frequency = [0] * len(words_list) # 每篇文章初始化列表
print article_index
elif len(splits) == 1 and article_index is not None:
documents_words_matrix.loc[article_index,] = article_words_frequency
article_index = int(line.split('\\')[-1].split('.')[0])
article_words_frequency = [0] * len(words_list) # 每篇文章初始化列表
print article_index
else:
word = " ".join(splits[:-1])
frequency = int(splits[-1])
article_words_frequency[words_list.index(word)] = frequency
documents_words_matrix[documents_words_matrix>0] = 1
documents_words_matrix.to_csv('documents_words_matrix_0_1.csv', header=True, index=True)
f1.close()
f2.close()
def multiply_document_word_matrix():
df = pd.read_csv('documents_words_matrix_0_1.csv', index_col=0)
result = np.matrix(df.T)*np.matrix(df)
pd.DataFrame(result).to_csv('result_0_1.csv')
def select_topic_from_matrix():
topic1 = open('topic8', 'r')
topic2 = open('topic6', 'r')
termList = open('termList.txt', 'r')
words_list = []
for line in termList.readlines():
words_list.append(line.strip())
df = pd.read_csv('result_0_1.csv', index_col=0)
print df.head(3)
topic1_index = []
for line in topic1:
word = " ".join(line.split()[:-1])
topic1_index.append(words_list.index(word))
topic2_index = []
for line in topic2:
word = " ".join(line.split()[:-1])
topic2_index.append(words_list.index(word))
print topic1_index
print topic2_index
words_list = np.array(words_list)
topic1_self_df = np.matrix(df.loc[topic1_index, np.array(topic1_index).astype(str)])
topic2_self_df = np.matrix(df.loc[topic2_index, np.array(topic2_index).astype(str)])
topic1_topic2_df = np.matrix(df.loc[topic1_index, np.array(topic2_index).astype(str)])
topic1_self_df = pd.DataFrame(topic1_self_df, index=words_list[topic1_index], columns=words_list[topic1_index])
topic2_self_df = pd.DataFrame(topic2_self_df, index=words_list[topic2_index], columns=words_list[topic2_index])
topic1_topic2_df = pd.DataFrame(topic1_topic2_df, index=words_list[topic1_index], columns=words_list[topic2_index])
topic1_self_df.to_csv('topic1.csv',index=True,header=True)
topic2_self_df.to_csv('topic2.csv',index=True,header=True)
topic1_topic2_df.to_csv('topic1_topic2.csv',index=True,header=True)
def main():
# generate_documents_words_matrix()
# multiply_document_word_matrix()
select_topic_from_matrix()
if __name__ == '__main__':
main() | apache-2.0 |
fabioticconi/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/streamplot.py | 10 | 20629 | """
Streamline plotting for 2D vector fields.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.lines as mlines
import matplotlib.patches as patches
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=None, start_points=None):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 30x30 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
*start_points*: Nx2 array
Coordinates of starting points for the streamlines.
In data coordinates, the same as the ``x`` and ``y`` arrays.
*zorder* : int
any number
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
if zorder is None:
zorder = mlines.Line2D.zorder
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = axes._get_lines.get_next_color()
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
if color.shape != grid.shape:
msg = "If 'color' is given, must have the shape of 'Grid(x,y)'"
raise ValueError(msg)
line_colors = []
color = np.ma.masked_invalid(color)
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
if linewidth.shape != grid.shape:
msg = "If 'linewidth' is given, must have the shape of 'Grid(x,y)'"
raise ValueError(msg)
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
line_kw['zorder'] = zorder
arrow_kw['zorder'] = zorder
## Sanity checks.
if (u.shape != grid.shape) or (v.shape != grid.shape):
msg = "'u' and 'v' must be of shape 'Grid(x,y)'"
raise ValueError(msg)
u = np.ma.masked_invalid(u)
v = np.ma.masked_invalid(v)
integrate = get_integrator(u, v, dmap, minlength)
trajectories = []
if start_points is None:
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
else:
# Convert start_points from data to array coords
# Shift the seed points from the bottom left of the data so that
# data2grid works properly.
sp2 = np.asanyarray(start_points, dtype=np.float).copy()
sp2[:, 0] += np.abs(x[0])
sp2[:, 1] += np.abs(y[0])
for xs, ys in sp2:
xg, yg = dmap.data2grid(xs, ys)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx = np.array(t[0]) * grid.dx + grid.x_origin
ty = np.array(t[1]) * grid.dy + grid.y_origin
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
# Add arrows half way along each trajectory.
s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
n = np.searchsorted(s, s[-1] / 2.)
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.append(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
p = patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, **arrow_kw)
axes.add_patch(p)
arrows.append(p)
lc = mcollections.LineCollection(
streamlines, transform=transform, **line_kw)
lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width]
lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height]
if use_multicolor_lines:
lc.set_array(np.ma.hstack(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
axes.autoscale_view()
ac = matplotlib.collections.PatchCollection(arrows)
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet(object):
def __init__(self, lines, arrows, **kwargs):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
# ========================
class DomainMap(object):
"""Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
# Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = float(mask.nx - 1) / grid.nx
self.y_grid2mask = float(mask.ny - 1) / grid.ny
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = grid.nx / grid.width
self.y_data2grid = grid.ny / grid.height
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return (int((xi * self.x_grid2mask) + 0.5),
int((yi * self.y_grid2mask) + 0.5))
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def start_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid(object):
"""Grid of data."""
def __init__(self, x, y):
if x.ndim == 1:
pass
elif x.ndim == 2:
x_row = x[0, :]
if not np.allclose(x_row, x):
raise ValueError("The rows of 'x' must be equal")
x = x_row
else:
raise ValueError("'x' can have at maximum 2 dimensions")
if y.ndim == 1:
pass
elif y.ndim == 2:
y_col = y[:, 0]
if not np.allclose(y_col, y.T):
raise ValueError("The columns of 'y' must be equal")
y = y_col
else:
raise ValueError("'y' can have at maximum 2 dimensions")
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return True if point is a valid index of grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
class StreamMask(object):
"""Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
if np.isscalar(density):
if density <= 0:
raise ValueError("If a scalar, 'density' must be positive")
self.nx = self.ny = int(30 * density)
else:
if len(density) != 2:
raise ValueError("'density' can have at maximum 2 dimensions")
self.nx = int(30 * density[0])
self.ny = int(30 * density[1])
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, *args):
return self._mask.__getitem__(*args)
def _start_trajectory(self, xm, ym):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask.__setitem__(t, 0)
def _update_trajectory(self, xm, ym):
"""Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
raise InvalidIndexError
class InvalidIndexError(Exception):
pass
class TerminateTrajectory(Exception):
pass
# Integrator definitions
#========================
def get_integrator(u, v, dmap, minlength):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / dmap.grid.nx
v_ax = v / dmap.grid.ny
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0):
"""Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
dmap.start_trajectory(x0, y0)
sf, xf_traj, yf_traj = _integrate_rk12(x0, y0, dmap, forward_time)
dmap.reset_start_point(x0, y0)
sb, xb_traj, yb_traj = _integrate_rk12(x0, y0, dmap, backward_time)
# combine forward and backward trajectories
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if stotal > minlength:
return x_traj, y_traj
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
def _integrate_rk12(x0, y0, dmap, f):
"""2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
solvers in most setups on my machine. I would recommend removing the
other two to keep things simple.
"""
# This error is below that needed to match the RK4 integrator. It
# is set for visual reasons -- too low and corners start
# appearing ugly and jagged. Can be tuned.
maxerror = 0.003
# This limit is important (for all integrators) to avoid the
# trajectory skipping some mask cells. We could relax this
# condition if we use the code which is commented out below to
# increment the location gradually. However, due to the efficient
# nature of the interpolation, this doesn't boost speed by much
# for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xf_traj = []
yf_traj = []
while dmap.grid.within_grid(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x,
yi + ds * k1y)
except IndexError:
# Out of the domain on one of the intermediate integration steps.
# Take an Euler step to the boundary to improve neatness.
ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
nx, ny = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi)
except InvalidIndexError:
break
if (stotal + ds) > 2:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xf_traj, yf_traj
def _euler_step(xf_traj, yf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi = xf_traj[-1]
yi = yf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xf_traj.append(xi + cx * ds)
yf_traj.append(yi + cy * ds)
return ds, xf_traj, yf_traj
# Utility functions
# ========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(np.int)
y = yi.astype(np.int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = np.int(xi)
y = np.int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2):
xn = x
else:
xn = x + 1
if y == (Ny - 2):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
i = 0
direction = 'right'
for i in xrange(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
| apache-2.0 |
jakobworldpeace/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 41 | 2672 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
kevin-coder/tensorflow-fork | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 39 | 20233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
samzhang111/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
thekerrlab/netpyne | netpyne/analysis/lfp.py | 1 | 16820 | """
analysis/lfp.py
Functions to plot and analyze LFP-related results
Contributors: salvadordura@gmail.com
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import range
from builtins import round
from builtins import str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from netpyne import __gui__
if __gui__:
import matplotlib.pyplot as plt
from matplotlib import mlab
import numpy as np
from numbers import Number
from .utils import colorList, exception, _saveFigData, _showFigure, _smooth1d
# -------------------------------------------------------------------------------------------------------------------
## Plot LFP (time-resolved, power spectral density, time-frequency and 3D locations)
# -------------------------------------------------------------------------------------------------------------------
@exception
def plotLFP (electrodes = ['avg', 'all'], plots = ['timeSeries', 'PSD', 'spectrogram', 'locations'], timeRange=None, NFFT=256, noverlap=128,
nperseg=256, minFreq=1, maxFreq=100, stepFreq=1, smooth=0, separation=1.0, includeAxon=True, logx=False, logy=False, norm=False, dpi=200, overlay=False, filtFreq = False, filtOrder=3, detrend=False, specType='morlet', fontSize=14, colors = None, maxPlots=8, lineWidth=1.5, figSize = (8,8), saveData = None, saveFig = None, showFig = True):
'''
Plot LFP
- electrodes (list): List of electrodes to include; 'avg'=avg of all electrodes; 'all'=each electrode separately (default: ['avg', 'all'])
- plots (list): list of plot types to show (default: ['timeSeries', 'PSD', 'timeFreq', 'locations'])
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- NFFT (int, power of 2): Number of data points used in each block for the PSD and time-freq FFT (default: 256)
- noverlap (int, <nperseg): Number of points of overlap between segments for PSD and time-freq (default: 128)
- minFreq (float)
- maxFreq (float): Maximum frequency shown in plot for PSD and time-freq (default: 100 Hz)
- stepFreq (float)
- nperseg (int): Length of each segment for time-freq (default: 256)
- smooth (int): Window size for smoothing LFP; no smoothing if 0 (default: 0)
- separation (float): Separation factor between time-resolved LFP plots; multiplied by max LFP value (default: 1.0)
- includeAxon (boolean): Whether to show the axon in the location plot (default: True)
- logx (boolean)
- logy (boolean)
- norm (boolean)
- filtFreq (float)
- filtOrder (int)
- detrend (false)
- specType ('morlet'|'fft')
- overlay (boolean)
- dpi (int)
- colors
- maxPlots
- lineWidth
- figSize ((width, height)): Size of figure (default: (10,8))
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handles
'''
from .. import sim
from ..support.scalebar import add_scalebar
print('Plotting LFP ...')
if not colors: colors = colorList
# set font size
plt.rcParams.update({'font.size': fontSize})
# time range
if timeRange is None:
timeRange = [0,sim.cfg.duration]
lfp = np.array(sim.allSimData['LFP'])[int(timeRange[0]/sim.cfg.recordStep):int(timeRange[1]/sim.cfg.recordStep),:]
if filtFreq:
from scipy import signal
fs = 1000.0/sim.cfg.recordStep
nyquist = fs/2.0
if isinstance(filtFreq, list): # bandpass
Wn = [filtFreq[0]/nyquist, filtFreq[1]/nyquist]
b, a = signal.butter(filtOrder, Wn, btype='bandpass')
elif isinstance(filtFreq, Number): # lowpass
Wn = filtFreq/nyquist
b, a = signal.butter(filtOrder, Wn)
for i in range(lfp.shape[1]):
lfp[:,i] = signal.filtfilt(b, a, lfp[:,i])
if detrend:
from scipy import signal
for i in range(lfp.shape[1]):
lfp[:,i] = signal.detrend(lfp[:,i])
if norm:
for i in range(lfp.shape[1]):
offset = min(lfp[:,i])
if offset <= 0:
lfp[:,i] += abs(offset)
lfp[:,i] /= max(lfp[:,i])
# electrode selection
if 'all' in electrodes:
electrodes.remove('all')
electrodes.extend(list(range(int(sim.net.recXElectrode.nsites))))
# plotting
figs = []
#maxPlots = 8.0
data = {'lfp': lfp} # returned data
# time series -----------------------------------------
if 'timeSeries' in plots:
ydisp = np.absolute(lfp).max() * separation
offset = 1.0*ydisp
t = np.arange(timeRange[0], timeRange[1], sim.cfg.recordStep)
if figSize:
figs.append(plt.figure(figsize=figSize))
for i,elec in enumerate(electrodes):
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
color = 'k'
lw=1.0
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
color = colors[i%len(colors)]
lw=1.0
plt.plot(t, -lfpPlot+(i*ydisp), color=color, linewidth=lw)
if len(electrodes) > 1:
plt.text(timeRange[0]-0.07*(timeRange[1]-timeRange[0]), (i*ydisp), elec, color=color, ha='center', va='top', fontsize=fontSize, fontweight='bold')
ax = plt.gca()
data['lfpPlot'] = lfpPlot
data['ydisp'] = ydisp
data['t'] = t
# format plot
if len(electrodes) > 1:
plt.text(timeRange[0]-0.14*(timeRange[1]-timeRange[0]), (len(electrodes)*ydisp)/2.0, 'LFP electrode', color='k', ha='left', va='bottom', fontSize=fontSize, rotation=90)
plt.ylim(-offset, (len(electrodes))*ydisp)
else:
plt.suptitle('LFP Signal', fontSize=fontSize, fontweight='bold')
ax.invert_yaxis()
plt.xlabel('time (ms)', fontsize=fontSize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.subplots_adjust(bottom=0.1, top=1.0, right=1.0)
# calculate scalebar size and add scalebar
round_to_n = lambda x, n, m: int(np.ceil(round(x, -int(np.floor(np.log10(abs(x)))) + (n - 1)) / m)) * m
scaley = 1000.0 # values in mV but want to convert to uV
m = 10.0
sizey = 100/scaley
while sizey > 0.25*ydisp:
try:
sizey = round_to_n(0.2*ydisp*scaley, 1, m) / scaley
except:
sizey /= 10.0
m /= 10.0
labely = '%.3g $\mu$V'%(sizey*scaley)#)[1:]
if len(electrodes) > 1:
add_scalebar(ax,hidey=True, matchy=False, hidex=False, matchx=False, sizex=0, sizey=-sizey, labely=labely, unitsy='$\mu$V', scaley=scaley,
loc=3, pad=0.5, borderpad=0.5, sep=3, prop=None, barcolor="black", barwidth=2)
else:
add_scalebar(ax, hidey=True, matchy=False, hidex=True, matchx=True, sizex=None, sizey=-sizey, labely=labely, unitsy='$\mu$V', scaley=scaley,
unitsx='ms', loc=3, pad=0.5, borderpad=0.5, sep=3, prop=None, barcolor="black", barwidth=2)
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'lfp.png'
plt.savefig(filename, dpi=dpi)
# PSD ----------------------------------
if 'PSD' in plots:
if overlay:
figs.append(plt.figure(figsize=figSize))
else:
numCols = 1# np.round(len(electrodes) / maxPlots) + 1
figs.append(plt.figure(figsize=(figSize[0]*numCols, figSize[1])))
#import seaborn as sb
allFreqs = []
allSignal = []
data['allFreqs'] = allFreqs
data['allSignal'] = allSignal
for i,elec in enumerate(electrodes):
if not overlay:
plt.subplot(np.ceil(len(electrodes)/numCols), numCols,i+1)
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
color = 'k'
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
color = colors[i%len(colors)]
Fs = int(1000.0/sim.cfg.recordStep)
power = mlab.psd(lfpPlot, Fs=Fs, NFFT=NFFT, detrend=mlab.detrend_none, window=mlab.window_hanning,
noverlap=noverlap, pad_to=None, sides='default', scale_by_freq=None)
if smooth:
signal = _smooth1d(10*np.log10(power[0]), smooth)
else:
signal = 10*np.log10(power[0])
freqs = power[1]
allFreqs.append(freqs)
allSignal.append(signal)
plt.plot(freqs[freqs<maxFreq], signal[freqs<maxFreq], linewidth=lineWidth, color=color, label='Electrode %s'%(str(elec)))
plt.xlim([0, maxFreq])
if len(electrodes) > 1 and not overlay:
plt.title('Electrode %s'%(str(elec)), fontsize=fontSize)
plt.ylabel('dB/Hz', fontsize=fontSize)
# ALTERNATIVE PSD CALCULATION USING WELCH
# from http://joelyancey.com/lfp-python-practice/
# from scipy import signal as spsig
# Fs = int(1000.0/sim.cfg.recordStep)
# maxFreq=100
# f, psd = spsig.welch(lfpPlot, Fs, nperseg=100)
# plt.semilogy(f,psd,'k')
# sb.despine()
# plt.xlim((0,maxFreq))
# plt.yticks(size=fontsiz)
# plt.xticks(size=fontsiz)
# plt.ylabel('$uV^{2}/Hz$',size=fontsiz)
# format plot
plt.xlabel('Frequency (Hz)', fontsize=fontSize)
if overlay:
plt.legend(fontsize=fontSize)
plt.tight_layout()
plt.suptitle('LFP Power Spectral Density', fontsize=fontSize, fontweight='bold') # add yaxis in opposite side
plt.subplots_adjust(bottom=0.08, top=0.92)
if logx:
pass
#from IPython import embed; embed()
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'lfp_psd.png'
plt.savefig(filename, dpi=dpi)
# Spectrogram ------------------------------
if 'spectrogram' in plots:
import matplotlib.cm as cm
numCols = 1 #np.round(len(electrodes) / maxPlots) + 1
figs.append(plt.figure(figsize=(figSize[0]*numCols, figSize[1])))
#t = np.arange(timeRange[0], timeRange[1], sim.cfg.recordStep)
if specType == 'morlet':
from ..support.morlet import MorletSpec, index2ms
spec = []
for i,elec in enumerate(electrodes):
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
fs = int(1000.0 / sim.cfg.recordStep)
t_spec = np.linspace(0, index2ms(len(lfpPlot), fs), len(lfpPlot))
spec.append(MorletSpec(lfpPlot, fs, freqmin=minFreq, freqmax=maxFreq, freqstep=stepFreq))
f = np.array(range(minFreq, maxFreq+1, stepFreq)) # only used as output for user
vmin = np.array([s.TFR for s in spec]).min()
vmax = np.array([s.TFR for s in spec]).max()
for i,elec in enumerate(electrodes):
plt.subplot(np.ceil(len(electrodes) / numCols), numCols, i + 1)
T = timeRange
F = spec[i].f
if norm:
spec[i].TFR = spec[i].TFR / vmax
S = spec[i].TFR
vc = [0, 1]
else:
S = spec[i].TFR
vc = [vmin, vmax]
plt.imshow(S, extent=(np.amin(T), np.amax(T), np.amin(F), np.amax(F)), origin='lower', interpolation='None', aspect='auto', vmin=vc[0], vmax=vc[1], cmap=plt.get_cmap('viridis'))
plt.colorbar(label='Power')
plt.ylabel('Hz')
plt.tight_layout()
if len(electrodes) > 1:
plt.title('Electrode %s' % (str(elec)), fontsize=fontSize - 2)
elif specType == 'fft':
from scipy import signal as spsig
spec = []
for i,elec in enumerate(electrodes):
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
# creates spectrogram over a range of data
# from: http://joelyancey.com/lfp-python-practice/
fs = int(1000.0/sim.cfg.recordStep)
f, t_spec, x_spec = spsig.spectrogram(lfpPlot, fs=fs, window='hanning',
detrend=mlab.detrend_none, nperseg=nperseg, noverlap=noverlap, nfft=NFFT, mode='psd')
x_mesh, y_mesh = np.meshgrid(t_spec*1000.0, f[f<maxFreq])
spec.append(10*np.log10(x_spec[f<maxFreq]))
vmin = np.array(spec).min()
vmax = np.array(spec).max()
for i,elec in enumerate(electrodes):
plt.subplot(np.ceil(len(electrodes)/numCols), numCols, i+1)
plt.pcolormesh(x_mesh, y_mesh, spec[i], cmap=cm.viridis, vmin=vmin, vmax=vmax)
plt.colorbar(label='dB/Hz', ticks=[np.ceil(vmin), np.floor(vmax)])
if logy:
plt.yscale('log')
plt.ylabel('Log-frequency (Hz)')
if isinstance(logy, list):
yticks = tuple(logy)
plt.yticks(yticks, yticks)
else:
plt.ylabel('(Hz)')
if len(electrodes) > 1:
plt.title('Electrode %s'%(str(elec)), fontsize=fontSize-2)
plt.xlabel('time (ms)', fontsize=fontSize)
plt.tight_layout()
plt.suptitle('LFP spectrogram', size=fontSize, fontweight='bold')
plt.subplots_adjust(bottom=0.08, top=0.90)
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'lfp_timefreq.png'
plt.savefig(filename, dpi=dpi)
# locations ------------------------------
if 'locations' in plots:
cvals = [] # used to store total transfer resistance
for cell in sim.net.compartCells:
trSegs = list(np.sum(sim.net.recXElectrode.getTransferResistance(cell.gid)*1e3, axis=0)) # convert from Mohm to kilohm
if not includeAxon:
i = 0
for secName, sec in cell.secs.items():
nseg = sec['hObj'].nseg #.geom.nseg
if 'axon' in secName:
for j in range(i,i+nseg): del trSegs[j]
i+=nseg
cvals.extend(trSegs)
includePost = [c.gid for c in sim.net.compartCells]
fig = sim.analysis.plotShape(includePost=includePost, showElectrodes=electrodes, cvals=cvals, includeAxon=includeAxon, dpi=dpi,
fontSize=fontSize, saveFig=saveFig, showFig=showFig, figSize=figSize)[0]
figs.append(fig)
outputData = {'LFP': lfp, 'electrodes': electrodes, 'timeRange': timeRange, 'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig}
if 'PSD' in plots:
outputData.update({'allFreqs': allFreqs, 'allSignal': allSignal})
if 'spectrogram' in plots:
outputData.update({'spec': spec, 't': t_spec*1000.0, 'freqs': f[f<=maxFreq]})
#save figure data
if saveData:
figData = outputData
_saveFigData(figData, saveData, 'lfp')
# show fig
if showFig: _showFigure()
return figs, outputData | mit |
peraktong/Cannon-Experiment | 0228_opt_simultaneously_1_data.py | 1 | 4384 | import os
import numpy as np
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
import pickle
import astropy.io.fits as ts
import AnniesLasso_2 as tc
import math
def get_pixmask(flux, err):
bad_flux = ~np.isfinite(flux)
bad_err = (~np.isfinite(err)) | (err <= 0)
bad_pixels = bad_err | bad_flux
return bad_pixels
# train the cannon
## training the cannon
pkl_file = open('wl.pkl', 'rb')
wl = pickle.load(pkl_file)
pkl_file.close()
training_set_path = "/Users/caojunzhi/Desktop/NYU/Laboratory/task 2016.8.1-12.23/My codes/Cannon Experiment python 3.5/reference_labels.csv"
training_set_spectrum_dir = "/Users/caojunzhi/Desktop/NYU/Laboratory/task 2016.8.1-12.23/My codes/Cannon Experiment python 3.5/Data/"
training_set = Table.read("reference_labels.csv")
N = len(training_set)
keep = np.ones(N, dtype=bool)
training_set_flux = []
training_set_ivar = []
training_set_error = []
for i, row in enumerate(training_set):
image_path = os.path.join(training_set_spectrum_dir, row["ID"])
if not os.path.exists(image_path):
print("{}/{} could not be found: {}".format(i + 1, N, image_path))
keep[i] = False
continue
print("{}/{}: {}".format(i + 1, N, image_path))
image = fits.open(image_path)
flux = image[1].data
flux_err = image[2].data
badpix = get_pixmask(flux, flux_err)
ivar = 1.0 / flux_err ** 2
error = flux_err
# badpix is a array and the length is 8575
flux[badpix] = 1.0
ivar[badpix] = 0.0
training_set_flux.append(flux)
training_set_ivar.append(ivar)
training_set_error.append(error)
training_set_flux = np.array(training_set_flux)
training_set_ivar = np.array(training_set_ivar)
training_set_error = np.array(training_set_error)
assert all(keep)
# Construct model.
model = tc.L1RegularizedCannonModel(
training_set, training_set_flux, training_set_ivar, threads=8)
model.s2 = 0
model.regularization = 0
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(training_set,
tc.vectorizer.polynomial.terminator((
"Teff_{corr}",
"logg_{corr}",
"[M/H]_{corr}"),
2))
model.train()
pkl_file = open('n_900_path_fits.pkl', 'rb')
path_fits = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open('n_900_path_flux.pkl', 'rb')
path_flux = pickle.load(pkl_file)
pkl_file.close()
# mean_ivar
pkl_file = open('n_900_mean_ivar.pkl', 'rb')
mi = pickle.load(pkl_file)
pkl_file.close()
N = len(path_fits)
# obtain theta:
theta = model.theta
# Set the boundary to be 0
one = 0 * np.ones(len(theta[0, :]))
row = len(theta[:, 0])
# x
theta_x = np.vstack((theta, one))
theta_x = theta_x[1:row + 1, :]
# y
theta_y = theta
# z
theta_z = np.vstack((one, theta))
theta_z = theta_z[0, :row]
for i in range(0,N):
star = fits.open(path_fits[i])
flux = star[0].data
ivar = star[1].data
parameters = star[4].data[:,0:3]
inf_labels = star[9].data
#initial data
initial_labels = np.hstack((inf_labels, parameters))
n_i = len(flux[:,0])
result = []
inf_flux_sim = []
for j in range(0,n_i):
flux_j = np.atleast_2d(flux[j,:])
ivar_j = np.atleast_2d(ivar[j,:])
#initial_j = np.atleast_2d(initial_labels[j,:])
initial_j = np.atleast_2d([4678.85,1.980,-1.385,0,1,0])
label_6,un = model.fit_opt(flux_j, ivar_j, initial_labels=initial_j)
# new inferred flux:
label_6 = np.ravel(label_6)
a = label_6[3]
b = label_6[4]
c = label_6[5]
inf_labels_sim = label_6[0:3]
v_sim = model.vectorizer.get_label_vector(inf_labels_sim)
inf_flux = a*np.dot(v_sim, theta_x.T)+b*np.dot(v_sim, theta_y.T)+c*np.dot(v_sim, theta_z.T)
result.append(label_6)
inf_flux = np.ravel(inf_flux)
inf_flux_sim.append(inf_flux)
result = np.array(result)
inf_flux_sim = np.array(inf_flux_sim)
print("saving %d"%(i+1))
ts.append(path_fits[i], result)
ts.append(path_fits[i], inf_flux_sim)
| mit |
shikhardb/scikit-learn | sklearn/linear_model/coordinate_descent.py | 13 | 73434 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
gbcolborne/TALN_2016 | exp_AD.py | 1 | 5248 | #! -*- coding:utf-8 -*-
""" Evaluate count-based distributional semantic models. """
import sys, os, codecs, argparse
from sklearn.metrics.pairwise import pairwise_distances
import Corpus, CoocTensor, eval_utils
def join_strings_and_append_to_file(strings, filename):
"""
Join strings in list using comma as delimiter, append to file at
path provided.
"""
with open(filename, 'a') as f:
f.write(','.join(strings)+'\n')
if __name__ == "__main__":
dsc = ("Évaluer différentes paramétrisations de l'analyse distributionnelle "
"au moyen des données de référence.")
parser = argparse.ArgumentParser(description=dsc)
parser.add_argument('-s', '--exclude_stop', action="store_true",
help='Exclure les mots vides des mots-cibles')
parser.add_argument('corpus', help="Chemin du corpus (fichier texte, une phrase par ligne)")
parser.add_argument('output', help="Chemin du fichier de résultats (CSV) qui sera produit")
args = parser.parse_args()
# Check args
if os.path.exists(args.output):
sys.exit('ERREUR: Le fichier {} existe déjà.'.format(args.output))
# Check that the evaluation data are where they should be
PATH_REF = 'data/ref_FR.csv'
if not os.path.isfile(PATH_REF):
e = "Erreur: le fichier {} n'existe pas.".format(PATH_REF)
sys.exit(e)
# Load stop words if necessary
stopwords = None
if args.exclude_stop:
stopwords = set()
with codecs.open('data/stop_FR.txt', 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if len(line) and line[0] != '#':
stopwords.add(line)
# Prepare corpus, compute vocab
print u'\nLecture du corpus et calcul du vocabulaire...'
corpus = Corpus.Corpus(args.corpus)
print u'Terminée. Le corpus contient {} mots.'.format(corpus.size)
# Select target words
print u'\nIdentification des mots-cibles...'
nb_targets = 10000
targets = []
for word in corpus.most_freq_words(apply_filters=True, stopwords=stopwords):
targets.append(word)
if len(targets) == nb_targets:
break
target2id = dict([(w,i) for (i,w) in enumerate(targets)])
print u'Terminée. {} mots-cibles identifiés.'.format(len(targets))
# Process evaluation data
print u'\nLecture des données de référence...'
ref = eval_utils.process_ref(PATH_REF, target_words=targets)
nb_rels = sum(len(v) for v in ref['TOUTES'].itervalues())
if nb_rels == 0:
sys.exit('ERREUR: Aucune relation extraite.')
print u'Terminée. {} relations extraites.'.format(nb_rels)
# Transform corpus into list of word IDs
print u'\nPréparation du corpus...'
corpus = corpus.list_word_IDs(target2id, OOV_ID=-1)
print u'Terminée.'
# Define parameter values to be tested
max_win_size = 10
win_sizes = range(max_win_size,0,-1)
win_types = ['G&D', 'G+D']
win_shapes = ['rect', 'tri']
weighting_schemes = [('None', 'None'), ('None', 'log'), ('MI', 'None'),
('MI2', 'None'), ('MI3', 'None'), ('local-MI', 'log'),
('z-score', 'sqrt'), ('t-score', 'sqrt'), ('simple-ll', 'log')]
model_params = []
for wt in win_types:
for ws in win_shapes:
for cs in win_sizes:
for (we,tr) in weighting_schemes:
model_params.append((wt,ws,cs,we,tr))
# Compute (word, context, position) cooccurrence frequency tensor
print u'\nCalcul du tenseur de cooccurrence...'
tensor = CoocTensor.CoocTensor(corpus, max_win_size)
print u'Terminé.'
# Write header in output
ref_rels = ['QSYN', 'ANTI', 'HYP', 'DRV', 'TOUTES']
header = ['win_type','win_shape','win_size','weighting_scheme']
header += ['MAP({})'.format(rel) for rel in ref_rels]
join_strings_and_append_to_file(header, args.output)
# Start experiment
num_models = len(model_params)
model_counter = 0
for (wt, ws, cs, we, tr) in model_params:
model_counter += 1
# Build model
msg = u'\nConstruction du modèle {} de {}...'.format(model_counter,num_models)
msg += u'\n - type de fenêtre = {}'.format(wt)
msg += u'\n - taille de fenêtre = {}'.format(cs)
msg += u'\n - forme de fenêtre = {}'.format(ws)
msg += u"\n - mesure d'association = {}".format(we)
msg += u'\n - transformation = {}'.format(tr)
print msg
model = tensor.to_matrix(cs, wt, ws, we, tr)
# Compute MAP on all data sets
print u'Évaluation du modèle...'
dist = pairwise_distances(model, metric='cosine')
MAP_strings = []
for rel in ref_rels:
MAP = eval_utils.compute_MAP_from_dist_mat(dist, ref[rel], target2id)
MAP_strings.append('{:.4f}'.format(MAP))
print u'Terminée.'
# Write results of model evaluation (MAP)
param_strings = [wt, ws, str(cs), '{}+{}'.format(we, tr)]
join_strings_and_append_to_file(param_strings + MAP_strings, args.output)
print u'\n\n\nVoir les résultats dans {}.\n'.format(args.output)
| mit |
lbeltrame/bcbio-nextgen | bcbio/heterogeneity/loh.py | 2 | 15786 | """Summarize amplification and loss of heterozygosity (LOH) from heterogeneity callers.
Provides high level summaries of calls in regions of interest.
"""
import csv
import collections
import os
import decimal
import uuid
import pandas as pd
import six
from six import StringIO
import toolz as tz
import yaml
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
# Standard sets of coordinates we always include
_COORDS = {"LOH":
{"hg38": {"HLA": ("chr6", 28510120, 33480577),
"B2M": ("chr15", 44711487, 44718877)},
"hg19": {"HLA": ("chr6", 29640000, 33120000),
"B2M": ("chr15", 45003675, 45011075)},
"GRCh37": {"HLA": ("6", 29640000, 33120000),
"B2M": ("15", 45003675, 45011075)}}}
def get_coords(data):
"""Retrieve coordinates of genes of interest for prioritization.
Can read from CIViC input data or a supplied BED file of chrom, start, end
and gene information.
"""
for category, vtypes in [("LOH", {"LOSS", "HETEROZYGOSITY"}),
("amplification", {"AMPLIFICATION"})]:
out = tz.get_in([category, dd.get_genome_build(data)], _COORDS, {})
priority_file = dd.get_svprioritize(data)
if priority_file:
if os.path.basename(priority_file).find("civic") >= 0:
for chrom, start, end, gene in _civic_regions(priority_file, vtypes, dd.get_disease(data)):
out[gene] = (chrom, start, end)
elif os.path.basename(priority_file).find(".bed") >= 0:
for line in utils.open_gzipsafe(priority_file):
parts = line.strip().split("\t")
if len(parts) >= 4:
chrom, start, end, gene = parts[:4]
out[gene] = (chrom, int(start), int(end))
yield category, out
def _matches(tocheck, target):
for t in target:
t = t.lower()
for c in tocheck:
if c.lower().find(t) >= 0:
return True
def _civic_regions(civic_file, variant_types=None, diseases=None, drugs=None):
"""Retrieve gene regions and names filtered by variant_types and diseases.
"""
if isinstance(diseases, six.string_types):
diseases = [diseases]
with utils.open_gzipsafe(civic_file) as in_handle:
reader = csv.reader(in_handle, delimiter="\t")
for chrom, start, end, info_str in reader:
info = edn_loads(info_str)
if not variant_types or _matches(info["support"]["variants"], variant_types):
if not diseases or _matches(info["support"]["diseases"], diseases):
if not drugs or _matches(info["support"]["drugs"], drugs):
yield (chrom, int(start), int(end), list(info["name"])[0])
def summary_status(call, data):
"""Retrieve status in regions of interest, along with heterogeneity metrics.
Provides output with overall purity and ploidy, along with region
specific calls.
"""
out_file = None
if call.get("vrn_file") and os.path.exists(call.get("vrn_file")):
out_file = os.path.join(os.path.dirname(call["vrn_file"]),
"%s-%s-lohsummary.yaml" % (dd.get_sample_name(data), call["variantcaller"]))
if not utils.file_uptodate(out_file, call["vrn_file"]):
out = {}
if call["variantcaller"] == "titancna":
out.update(_titancna_summary(call, data))
pass
elif call["variantcaller"] == "purecn":
out.update(_purecn_summary(call, data))
if out:
out["description"] = dd.get_sample_name(data)
out["variantcaller"] = call["variantcaller"]
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file if out_file and os.path.exists(out_file) else None
def _check_copy_number_changes(svtype, cn, minor_cn, data):
"""Check if copy number changes match the expected svtype.
"""
if svtype == "LOH" and minor_cn == 0:
return svtype
elif svtype == "amplification" and cn > dd.get_ploidy(data):
return svtype
else:
return "std"
def _to_cn(v):
return int(round(float(v)))
def _titancna_summary(call, data):
"""Summarize purity, ploidy and LOH for TitanCNA.
"""
out = {}
for svtype, coords in get_coords(data):
cur_calls = {k: collections.defaultdict(int) for k in coords.keys()}
with open(call["subclones"]) as in_handle:
header = in_handle.readline().strip().split()
for line in in_handle:
val = dict(zip(header, line.strip().split()))
start = int(val["Start_Position.bp."])
end = int(val["End_Position.bp."])
for region, cur_coords in coords.items():
if val["Chromosome"] == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]):
cur_calls[region][_check_copy_number_changes(svtype, _to_cn(val["Copy_Number"]),
_to_cn(val["MinorCN"]), data)] += 1
out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()}
with open(call["hetsummary"]) as in_handle:
vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t")))
out["purity"] = vals["purity"]
out["ploidy"] = vals["ploidy"]
return out
def _purecn_summary(call, data):
"""Summarize purity, ploidy and LOH for PureCN.
"""
out = {}
loh_calls = pd.read_csv(call["loh"])
for svtype, coords in get_coords(data):
cur_calls = {k: collections.defaultdict(int) for k in coords.keys()}
for rowid, row in loh_calls.iterrows():
_, chrom, start, end, _, cn, minor_cn = row.iloc[0:7]
if pd.isnull(cn) or pd.isnull(minor_cn):
# NA copy number calls - skip
continue
start = int(start)
end = int(end)
for region, cur_coords in coords.items():
if chrom == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]):
cur_calls[region][_check_copy_number_changes(svtype, _to_cn(cn), _to_cn(minor_cn), data)] += 1
out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()}
with open(call["hetsummary"]) as in_handle:
vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","),
in_handle.readline().strip().split(",")))
out["purity"] = vals["Purity"]
out["ploidy"] = vals["Ploidy"]
return out
def _merge_cn_calls(calls, svtype):
if calls[svtype]:
return "mixed" if calls["std"] else svtype
else:
return "no"
def are_overlapping(r, s):
"""Test if two coordinates overlap.
https://stackoverflow.com/a/27182551
"""
return r[1] >= s[0] and s[1] >= r[0]
# ## EDN parser
# Thanks to https://github.com/sunng87/pyclj
# Slightly adapter to avoid external dependencies
def edn_load(fp):
decoder = CljDecoder(fp)
return decoder.decode()
def edn_loads(s):
buf = StringIO(s)
result = edn_load(buf)
buf.close()
return result
def _number(v):
if v.endswith('M'):
out = decimal.Decimal(v[:-1])
else:
try:
out = int(v)
except ValueError as e:
out = float(v)
return out
_STOP_CHARS = [" ", ",", "\n", "\r", "\t"]
_COLL_OPEN_CHARS = ["#", "[", "{", "("]
_COLL_CLOSE_CHARS = ["]", "}", ")"]
_EXTRA_NUM_CHARS = ["-", "+", ".", "e", "E", "M"]
class CljDecoder(object):
def __init__(self, fd):
self.fd = fd
self.cur_line = 1
self.cur_pos = 1
self.value_stack = []
self.terminator = None ## for collection type
def decode(self):
while True:
v = self.__read_token()
if len(self.value_stack) == 0:
return v
def __seek_back(self, size):
self.fd.seek(self.fd.tell()-size, 0)
def __read_and_back(self, size):
s = self.fd.read(size)
self.__seek_back(size)
return s
def __get_type_from_char(self, c):
"""return a tuple of type information
* type name
* a flag to indicate if it's a collection
"""
if c.isdigit() or c =='-':
return ("number", False, None)
elif c == 't' or c == 'f': ## true/false
return ("boolean", False, None)
elif c == 'n': ## nil
return ("nil", False, None)
elif c == '\\' :
return ("char", False, None)
elif c == ':':
return ("keyword", False, None)
elif c == '"':
return ("string", False, None)
elif c == '#':
if self.__read_and_back(1) == '{':
return ("set", True, "}")
if self.__read_and_back(1) == ':':
return ("namespaced_dict", True, "}")
if self.__read_and_back(4) == 'inst':
return ("datetime", False, None)
if self.__read_and_back(4) == 'uuid':
return ("uuid", False, None)
elif c == '{':
return ("dict", True, "}")
elif c == '(':
return ("list", True, ")")
elif c == '[':
return ('list', True, "]")
return (None, False, None)
def __read_fd(self, size):
if size == 1:
c = self.fd.read(size)
if c == '\n':
self.cur_pos = 0
self.cur_line = self.cur_line + 1
return c
else:
self.cur_pos = self.cur_pos + size
cs = self.fd.read(size)
return cs
def __read_token(self):
c = self.__read_fd(1)
## skip all stop chars if necessary
while c in _STOP_CHARS:
c = self.__read_fd(1)
## raise exception when unexpected EOF found
if c == '':
raise ValueError("Unexpected EOF")
t, coll, term = self.__get_type_from_char(c)
if coll:
## move cursor
if t == "set":
## skip {
self.__read_fd(1)
namespace = None
if t == "namespaced_dict":
## skip :
self.__read_fd(1)
## get namespace
buf = []
while c != '{':
c = self.__read_fd(1)
buf.append(c)
namespace = ''.join(buf[:-1])
self.terminator = term
self.value_stack.append(([], self.terminator, t, namespace))
return None
else:
v = None ## token value
e = None ## end char
r = True ## the token contains data or not
if t == "boolean":
if c == 't':
chars = self.__read_fd(4)
if chars[:3] != 'rue':
raise ValueError('Expect true, got t%s at line %d, col %d' % (chars[:3], self.cur_line, self.cur_pos))
e = chars[-1]
v = True
else:
chars = self.__read_fd(5)
if chars[:4] != 'alse':
raise ValueError('Expect true, got t%s at line %d, col %d' % (chars[:3], self.cur_line, self.cur_pos))
e = chars[-1]
v = False
elif t == "char":
buf = []
while c is not self.terminator and c != "" and c not in _STOP_CHARS:
c = self.__read_fd(1)
buf.append(c)
e = c
v = ''.join(buf[:-1])
elif t == "nil":
chars = self.__read_fd(3)
if chars[:2] != 'il':
raise ValueError('Expect nil, got n%s at line %d, col %d' % (chars[:2], self.cur_line, self.cur_pos))
e = chars[-1]
v = None
elif t == "number":
buf = []
while c.isdigit() or (c in _EXTRA_NUM_CHARS):
buf.append(c)
c = self.__read_fd(1)
e = c
numstr = ''.join(buf)
v = _number(numstr)
## special case for
## [23[12]]
## this is a valid clojure form
if e in _COLL_OPEN_CHARS:
self.__seek_back(1)
elif t == "keyword":
buf = [] ##skip the leading ":"
while c is not self.terminator and c != "" and c not in _STOP_CHARS:
c = self.__read_fd(1)
buf.append(c)
e = c
v = ''.join(buf[:-1])
elif t == "string":
buf = []
cp = c = self.__read_fd(1) ## to check escaping character \
while not(c == '"' and cp != '\\'):
buf.append(c)
cp = c
c = self.__read_fd(1)
e = c
v = unicode(''.join(buf).decode('unicode-escape'))
elif t == "datetime":
## skip "inst"
self.__read_fd(4)
## read next value as string
s = self.__read_token()
if not isinstance(s, six.string_types):
raise ValueError('Str expected, but got %s' % str(s))
## remove read string from the value_stack
if len(self.value_stack) > 0:
self.value_stack[-1][0].pop()
e = '"'
v = pyrfc3339.parse(s)
elif t == "uuid":
## skip "uuid"
self.__read_fd(4)
## read next value as string
s = self.__read_token()
if not isinstance(s, six.string_types):
raise ValueError('Str expected, but got %s' % str(s))
## remove read string from the value_stack
if len(self.value_stack) > 0:
self.value_stack[-1][0].pop()
e = '"'
v = uuid.UUID(s)
else:
if c not in _COLL_CLOSE_CHARS:
raise ValueError('Unexpected char: "%s" at line %d, col %d' % (c, self.cur_line, self.cur_pos))
r = False
e = c
if e == self.terminator:
current_scope, _, container, namespace = self.value_stack.pop()
if r:
current_scope.append(v)
if container == "set":
try:
v = set(current_scope)
except TypeError:
v = tuple(current_scope)
elif container == "list":
v = current_scope
elif container in ["dict", "namespaced_dict"]:
v = {}
for i in range(0, len(current_scope), 2):
key = '%s/%s' % (namespace, current_scope[i]) if namespace else current_scope[i]
v[key] = current_scope[i+1]
r = True
if r and len(self.value_stack) > 0:
self.value_stack[-1][0].append(v)
self.terminator = self.value_stack[-1][1]
return v
| mit |
vivekmishra1991/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
ProkopHapala/SimpleSimulationEngine | cpp/sketches_SDL/Molecular/python/eFF_terms.py | 1 | 12181 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spc
'''
Note: It seems that H2 Molecule cannot be sable without varying Kinetic Energy
see:
[1] https://link.aps.org/doi/10.1103/PhysRevLett.99.185003
Excited Electron Dynamics Modeling of Warm Dense Matter
Julius T. Su, William A. Goddard,
[2] http://aip.scitation.org/doi/10.1063/1.3272671
The dynamics of highly excited electronic systems: Applications of the electron force field
Julius T. Su, William A. Goddard
[3] http://dx.doi.org/10.1016/j.mechmat.2015.02.008
Non-adiabatic dynamics modeling framework for materials in extreme conditions
Hai Xiao, Andres Jaramillo-Botero, Patrick L. Theofanis, William A. Goddard
To check and obtain constants:
https://en.wikipedia.org/wiki/Hydrogen_atom#Bohr%E2%80%93Sommerfeld_Model
https://en.wikipedia.org/wiki/Fine-structure_constant
'''
# ==== constants in SI Units
# see https://en.wikipedia.org/wiki/Fine-structure_constant
const_hbar = 1.054571817e-34 # [J.s] #6.582119569e-16 # [eV/s]
const_Me = 9.10938356e-31 # [kg]
const_e = 1.602176620898e-19 # [Coulomb]
const_eps0 = 8.854187812813e-12 # [F.m = Coulomb/(Volt*m)]
const_eV = 1.602176620898e-19 # [J]
const_Angstroem = 1.0e-10
const_K = const_hbar**2/const_Me
const_El = const_e**2/(4.*np.pi*const_eps0)
const_Ry = 0.5 * const_El**2/const_K
const_Ry_eV = 13.6056925944
const_El_eVA = const_El/( const_e*const_Angstroem )
const_K_eVA = (const_El_eVA**2)/(2*const_Ry_eV)
print "const_El, const_El_eVA ", const_El, const_El_eVA
print "const_Ry const_Ry_eV ", const_Ry, const_Ry/const_eV
print "const_K, const_K_eVA ", const_K, const_K_eVA
#exit()
#const_K = const_hbar**2/const_Me # [ eV * A^2 ]
#const_K = 0.1* 30.0824137226 # [eV*A^2] hbar[J.s]^2/(Me [kg]) / ( eV[J]*A^2[m]) # (6.62607015e-34^2/9.10938356e-31)/1.602176620898e-19/10e-20
#const_Ke = 1.5*const_K
const_Ke_eVA = const_K_eVA*1.5
print "const_Ke_eVA ", const_Ke_eVA
#const_El = 14. # 14 (1./((4*np.pi*const_eps0))
sqrt2 = np.sqrt(2.)
def Kinetic( s ):
'''
Ek = (hbar^2/me) (3./2.) 1/s^2
'''
return const_Ke_eVA/(s**2)
def El( r, qq, si=0, sj=0 ):
if si>0:
if sj>0:
si = np.sqrt( si**2 + sj**2 )
return const_El_eVA * (qq/r) * spc.erf( sqrt2 * r/s )
else:
return const_El_eVA * (qq/r)
def El_aa( r, qq ):
return const_El_eVA * (qq/r)
def El_ae( r, qq, s ):
return const_El_eVA * (qq/r) * spc.erf( sqrt2 * r/s )
def El_ee( r, qq, si, sj ):
s = np.sqrt( si**2 + sj**2 )
return const_El_eVA * (qq/r) * spc.erf( sqrt2 * r/s )
def getT( r, si, sj ):
#print "getT si, sj ", si, sj
# r = r * 1.125
# s = s*0.9
si2 = si**2
sj2 = sj**2
r2 = r**2
#return const_K * ( 1.5*( (si2+sj2)/(si2*sj2) ) - 2.*( 3.*(si2+sj2) - 2.*r2 )/( si2 + sj2 )**2 )
return const_K_eVA * ( 1.5*( 1./si2 + 1./sj2 ) - 2.*( 3.*(si2+sj2) - 2.*r2 )/( si2 + sj2 )**2 )
def getAmp( si, sj ):
si2 = si**2
sj2 = sj**2
#return const_K_eVA * ( 1.5*( 1./si2 + 1./sj2 ) - 2.*( 3.*(si2+sj2) - 2.*0 )/( si2 + sj2 )**2 )
#return const_K_eVA * ( 1.5*( 1./si2 + 1./sj2 ) - 2.*( 1.*(si2+sj2) )/( si2 + sj2 )**2 )
#return const_K_eVA * 2.2*( 1.5*( 1/si2 + 1/sj2 ) - 4.9/( si2 + sj2 ) )
#return const_K_eVA * 2.2*( 1.5*( (si2 + sj2)/(si2*sj2) ) - 4.9/( si2 + sj2 ) )
#return const_K_eVA * 2.2*( 1.5*(si2*si2 + sj2*sj2) - 1.9*(si2*sj2) )/((si2*sj2)*(si2+sj2))
#return const_K_eVA * 2.2*( 1.5*(si2*si2 + sj2*sj2) - 1.9*(si2*sj2) )/((si2*sj2)*(si2+sj2))
return const_K_eVA * 3.3*( si2*si2 + sj2*sj2 - 1.25*(si2*sj2) )/((si2*sj2)*(si2+sj2))
#return const_K_eVA * 3.14*( si2*si2 + sj2*sj2 - 1.25*(si2*sj2) )/((si2*sj2)*(si2+sj2))
#return const_K_eVA * ( 1.5*( 1./si2 + 1./sj2 ) )
#return const_K_eVA * ( 1.5*( 1./si2 + 1./sj2 ) - 2.*3./( si2 + sj2 ) )
def getS( r, si, sj ):
#print "getS si, sj ", si, sj
# r = r * 1.125
# s = s*0.9
si2 = si**2
sj2 = sj**2
r2 = r**2
return ( 2.*(si*sj)/(si2+sj2) )**1.5 * np.exp( -r2/( si2 + sj2 ) )
'''
def EPauli( r, si, sj, rho=0.2 ):
T = getT( r, si, sj )
S = getS( r, si, sj )
S2 = S**2
# ( S2*(1+S2) + (1-rho)* S2*(1-S2) ) / (1-S2*S2 )
# ( S2+S2*S2 + (1-rho)*(S2-S2*S2) ) / (1-S2*S2 )
# ( ( (2-rho)*S2 +rho*S2*S2 ) / (1-S2*S2 )
return T * ( (S2/(1.-S2)) + ( 1.-rho )*(S2/(1.+S2)) )
def EPauli_pair( r, si, sj, rho=0.2 ):
T = getT( r, si, sj )
S = getS( r, si, sj )
S2 = S**2
return T * ( rho*S2/(1.+S2) )
'''
def EPauli( r, si, sj, anti=False, rho=0.2, kr=1.125, ks=0.9 ):
r = r*kr
si = si*ks
sj = sj*ks
T = getT( r, si, sj )
S = getS( r, si, sj )
S2 = S**2
if anti:
return T * ( rho*S2/(1.+S2) )
else:
return T * ( (S2/(1.-S2)) + ( 1.-rho )*(S2/(1.+S2)) )
def DensOverlap( r, si, sj, amp=10 ):
s2 = si**2+sj**2
#amp *= 1.4/s2
#amp *= 0.7/(si*sj)
#amp *= (1/si**2 + 1/sj**2)
#amp *= (si**2+sj**2)/(si*sj)**2
#amp *= (si+sj)**2/(si*sj)**2
#amp *= (1+(si-sj)**2)/min(si,sj)**2
#amp *= 0.5*(1+4*(si-sj)**2) *( 1/si**2 + 1/sj**2 )
a = 2*si*sj/s2
e1 = amp*a**3
e2 = np.exp( -2*(r**2/s2) )
return e1*e2
def Hatom( s ):
Ek = Kinetic( s )
Eae = El_ae( 0.01, -1., s )
#Etot = Ek+Eae
return Ek,Eae
def H2cation( rHH, s, cr=0.5 ):
Ek = Kinetic( s ) # kinetic energy of electron
Eaa = El_aa( rHH, 1. ) # Coulomb repulsion nuclei_1 + nuclei_2
Eae = El_ae( rHH*(cr ), -1., s ) # Coulomb attraction electron + nuclei_1
Eae += El_ae( rHH*(1.-cr), -1., s ) # Coulomb attraction electron + nuclei_2
return Ek, Eae, Eaa
def H2molecule( r, s, cr=0.5 ):
Ek = 2*Kinetic( s ) # kinetic energy of electron_1 and electron_2
Eaa = El_aa( r, +1. ) # Coulomb repulsion nuclei_1 * nuclei_2
Eae = 2*El_ae( r*(cr ), -1., s ) # Coulomb attraction (electron_1 * nuclei_1) + (electron_2 * nuclei_2)
Eae += 2*El_ae( r*(1.-cr), -1., s ) # Coulomb attraction (electron_1 * nuclei_2) + (electron_2 * nuclei_1)
Eee = El_ee( r*(1.-2.*cr), +1., s, s ) # Coulomb repulsion electron_1 * electron_2
EPaul = EPauli( r*(1.-2.*cr), s, s, anti=True ) # Pauli repulsion electron_1 * electron_2
return Ek, Eae, Eaa, Eee, EPaul
if __name__ == "__main__":
extent=( 0.5,8.0, 0.5,4.5 )
xs = np.arange( extent[0], extent[1], 0.05 )
ys = np.arange( extent[2], extent[3], 0.1 )
# ============= e-e onsite
r0 = 0.01
ss = np.arange( 0.25, 5.0, 0.1 )
rho=0.2; kr=1.125; ks=0.9
r_ = r0*kr
s_ = ss*ks
T = getT( r_, s_, s_ )
S = getS( r_, s_, s_ )
S2 = S**2
EPminus = T * ( rho*S2/(1.+S2) )
EPplus = T * ( (S2/(1.-S2)) + ( 1.-rho )*(S2/(1.+S2)) )
plt.figure()
plt.title( 'Onsite (R= %g [A])' %r0 )
plt.xlabel('sigma[A]')
plt.plot( ss, S, ':', label="S" )
plt.plot( ss, T, ':', label="T" )
plt.plot( ss, EPplus, 'b', label="EP+" )
plt.plot( ss, EPminus,'c', label="EP-" )
plt.legend()
plt.grid()
#plt.show(); exit()
# ============= e-e
rs = np.arange( 0.1, 6.0, 0.05 )
#ss = [0.5, 1.0, 1.5 ]
ss = [0.25, 1.0, 2.5 ]
rho=0.2; kr=1.125; ks=0.9
plt.figure(figsize=(13,10))
for i,si in enumerate(ss):
for j,sj in enumerate(ss):
Eee = El_ee( rs, +1., si, sj )
r_ = rs*kr
#s_ = s*ks
T = getT( r_, si*ks, sj*ks )
S = getS( r_, si*ks, sj*ks )
S2 = S**2
EPminus = T * ( rho*S2/(1.+S2) )
EPplus = T * ( (S2/(1.-S2)) + ( 1.-rho )*(S2/(1.+S2)) )
#amp = 10*(1+(si-sj)**2)/min(si,sj)**2
#amp = 10/min(si,sj)**2
#amp = 10*(1+0.6*abs(si-sj))/min(si,sj)**2
#amp = 10*(si/sj+sj/si)
#amp = 10
#amp = T*1.8
amp = getAmp( si, sj )
EPdens = DensOverlap( rs, si, sj, amp=amp )
plt.subplot(3,3,3*j+i+1)
#plt.plot( rs, S, ':', label="S" )
#plt.plot( xs, T, ':', label="T" )
#plt.plot( rs, Eee , 'r', label="Eee" )
plt.plot( rs, EPplus, 'b', label="EP+" )
#plt.plot( rs, EPminus,'c', label="EP-" )
plt.plot( rs, EPdens, 'm', label="EPdens" )
plt.title( 'sigma (%g,%g)' %(si,sj) )
plt.legend()
plt.grid()
#plt.plot( ys, Etot, 'k', label="Etot" )
plt.show(); exit()
# ============= H-atom
#Ek = Kinetic( ys )
#Eae = El_ae( 0.01, -1., ys )
Ek,Eae = Hatom( ys )
Etot = Ek+Eae
plt.figure()
plt.plot( ys, Ek , 'r', label="Ek" )
plt.plot( ys, Eae, 'b', label="Eae" )
plt.plot( ys, Etot, 'k', label="Etot" )
imin = np.argmin( Etot )
print "H-atom Rmin Emin(Ek,Eel) ", ys[imin], Etot[imin], Ek[imin], Eae[imin]
EHatom = Etot[imin]
plt.legend()
plt.grid()
#plt.show(); exit()
# ============= H2-cation
Xs,Ys = np.meshgrid( xs,ys )
Ek, Eae, Eaa = H2cation( Xs, Ys, cr=0.5 )
Etot = Ek + Eaa + Eae
#Emin = Etot.min()
imin = np.unravel_index( np.argmin(Etot), Etot.shape )
Emin = Etot[imin]
Rmin = xs[imin[0]]
Smin = ys[imin[1]]
print "H2cation Rmin, Smin Emin Ebond ", Rmin, Smin, Emin, Emin-EHatom
vmin=-20.0 # [eV]
vmax=-vmin
plt.figure(figsize=(20,5))
plt.subplot(1,4,1); plt.imshow( Etot, origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.title('Etot')
plt.subplot(1,4,2); plt.imshow( Ek , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.title('Ek' )
plt.subplot(1,4,3); plt.imshow( Eaa , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.title('Eaa' )
plt.subplot(1,4,4); plt.imshow( Eae , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.title('Eel' )
#plt.subplot(1,4,2); plt.imshow( Ek , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Ek' )
#plt.subplot(1,4,3); plt.imshow( Eaa , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Eaa' )
#plt.subplot(1,4,4); plt.imshow( Eae , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Eel' )
# ============= H2-molecule
Ek, Eae, Eaa, Eee, EPaul = H2molecule( Xs, Ys, cr=0.49 )
Etot = Ek + Eae + Eaa + Eee + EPaul
#Emin = Etot.min()
imin = np.unravel_index( np.argmin(Etot), Etot.shape )
Emin = Etot[imin]
Rmin = xs[imin[0]]
Smin = ys[imin[1]]
print "H2molecule Rmin, Smin Emin Ebond ", Rmin, Smin, Emin, Emin - 2*EHatom
vmin=-50.0 # [eV]
vmax= 0.0 # [eV]
plt.figure( figsize=(18,3) )
plt.subplot(1,6,1); plt.imshow( Etot, origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.colorbar() ;plt.title('Etot')
#plt.subplot(1,6,2); plt.imshow( Ek , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.colorbar() ;plt.title('Ek' )
#plt.subplot(1,6,3); plt.imshow( Eaa , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.colorbar() ;plt.title('Eaa' )
#plt.subplot(1,6,4); plt.imshow( Eae , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.colorbar() ;plt.title('Eea' )
#plt.subplot(1,6,5); plt.imshow( Eee , origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.colorbar() ;plt.title('Eee' )
#plt.subplot(1,6,6); plt.imshow( EPaul, origin='image', extent=extent, vmin=vmin,vmax=vmax ) ;plt.colorbar() ;plt.title('EPaul')
plt.subplot(1,6,2); plt.imshow( Ek , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Ek' )
plt.subplot(1,6,3); plt.imshow( Eaa , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Eaa' )
plt.subplot(1,6,4); plt.imshow( Eae , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Eea' )
plt.subplot(1,6,5); plt.imshow( Eee , origin='image', extent=extent ) ;plt.colorbar() ;plt.title('Eee' )
plt.subplot(1,6,6); plt.imshow( EPaul, origin='image', extent=extent ) ;plt.colorbar() ;plt.title('EPaul')
plt.show()
| mit |
imperial-genomics-facility/data-management-python | igf_data/process/metadata_reformat/reformat_samplesheet_file.py | 1 | 8902 | import re
import pandas as pd
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.utils.sequtils import rev_comp
from igf_data.process.metadata_reformat.reformat_metadata_file import Reformat_metadata_file
SAMPLESHEET_COLUMNS = [
'Lane',
'Sample_ID',
'Sample_Name',
'Sample_Plate',
'Sample_Well',
'I7_Index_ID',
'index',
'I5_Index_ID',
'index2',
'Sample_Project',
'Description',
'Pool_Number'
]
class Reformat_samplesheet_file:
'''
A class for reformatting samplesheet file
:param infile: Input samplesheet file
:param file_format: Input file format, default samplesheet
List of allowed formats
* samplesheet
* csv
:param samplesheet_columns: A list of expected columns in the samplesheet file
A list of default columns
* Lane
* Sample_ID
* Sample_Name
* Sample_Plate
* Sample_Well
* I7_Index_ID
* index
* Sample_Project
* Description
* Pool_number
:param remove_adapters: A toggle for removing adapters from header section ,default False
:param revcomp_index1: A toggle for reverse complementing index1 column, default False
:param revcomp_index2: A toggle for reverse complementing index2 column, default False
:param tenx_label: Description label for 10x experiments, default '10X'
:param sample_id: Sample id column name, default 'Sample_ID'
:param sample_name: Sample name column name, default 'Sample_Name'
:param index: I7 index column name, default 'index'
:param index2: I5 index column name, default 'index2'
:param sample_project: Project name column name, default 'Sample_Project'
:param description: Description column name, default 'Description'
:param adapter_section: Adapter section name in header, default 'Settings'
:param adapter_keys: A list of adapter keys to be removed from samplesheet header, default ('Adapter','AdapterRead2')
'''
def __init__(self,infile,
file_format='samplesheet',
samplesheet_columns=SAMPLESHEET_COLUMNS,
remove_adapters=False,
revcomp_index1=False,
revcomp_index2=False,
tenx_label='10X',
sample_id='Sample_ID',
sample_name='Sample_Name',
index='index',
index2='index2',
sample_project='Sample_Project',
description='Description',
adapter_section='Settings',
adapter_keys=('Adapter','AdapterRead2')):
try:
self.infile = infile
if file_format not in ['samplesheet','csv']:
raise ValueError('File format {0} not supported'.format(file_format))
self.file_format = file_format
self.samplesheet_columns = samplesheet_columns
self.tenx_label = tenx_label
self.remove_adapters = remove_adapters
self.revcomp_index1 = revcomp_index1
self.revcomp_index2 = revcomp_index2
self.sample_id = sample_id
self.sample_name = sample_name
self.index = index
self.index2 = index2
self.sample_project = sample_project
self.description = description
self.adapter_section = adapter_section
self.adapter_keys = adapter_keys
except Exception as e:
raise ValueError('Error in initializing samplesheet reformatting, error: {0}'.\
format(e))
@staticmethod
def detect_tenx_barcodes(index,tenx_label='10X'):
'''
A static method for checking 10X I7 index barcodes
:param index: I7 index string
:param tenx_label: A string description for 10X samples, default, '10X'
:returns: A string
'''
try:
description = ''
pattern = re.compile(r'SI-[GN]A-[A-H]\d+',re.IGNORECASE)
if re.match(pattern,index):
description = tenx_label
return description
except Exception as e:
raise ValueError('Failed to detect Tenx single cell barcode for index {0}, error: {1}'.\
format(index,e))
def correct_samplesheet_data_row(self,row):
'''
A method for correcting samplesheet data row
:param row: A Pandas Series
:returns: A Pandas Series
'''
try:
if not isinstance(row,pd.Series):
raise TypeError('Expecting A pandas series and got {0}'.\
format(type(row)))
if self.sample_id in row.keys():
row[self.sample_id] = \
Reformat_metadata_file.\
sample_and_project_reformat(row[self.sample_id]) # refoemat sample id
if self.sample_project in row.keys():
row[self.sample_project] = \
Reformat_metadata_file.\
sample_and_project_reformat(row[self.sample_project]) # refoemat project name
if self.sample_name in row.keys():
row[self.sample_name] = \
Reformat_metadata_file.\
sample_name_reformat(row[self.sample_name]) # refoemat sample name
if self.index in row.keys() and \
self.description in row.keys():
row[self.description] = \
self.detect_tenx_barcodes(\
index=row[self.index],
tenx_label=self.tenx_label) # add description label for 10x samples
if self.index in row.keys() and \
self.description in row.keys() and \
(row[self.index]!='' or row[self.index] is not None ) and \
row[self.description] != self.tenx_label:
row[self.index] = row[self.index].upper()
if self.revcomp_index1:
row[self.index] = rev_comp(row[self.index]) # revcomp index 1
if self.index2 in row.keys() and \
(row[self.index2]!='' or row[self.index2] is not None ):
row[self.index2] = row[self.index2].upper()
if self.revcomp_index2:
row[self.index2] = rev_comp(row[self.index2]) # revcomp index 2
if self.description in row.keys() and \
(row[self.description] !='' or \
row[self.description] is not None):
row[self.description] = row[self.description].upper() # change description to upper case letters
return row
except Exception as e:
raise ValueError('Failed to correct samplesheet data row {0},error {1}'.\
format(row,e))
def reformat_raw_samplesheet_file(self,output_file):
'''
A method for refoematting raw samplesheet file
:param output_file: An output file path
:returns: None
'''
try:
samplesheet_data = list()
if self.file_format == 'samplesheet':
samplesheet = SampleSheet(infile=self.infile)
samplesheet_data = pd.DataFrame(samplesheet._data)
elif self.file_format == 'csv':
samplesheet_data = pd.read_csv(self.infile,header=0,dtype=object)
samplesheet_data.fillna('',inplace=True)
samplesheet_data = \
samplesheet_data.\
apply(\
lambda row: self.correct_samplesheet_data_row(row=row),
axis=1,
result_type='reduce') # refoemat samplesheet data
column_names = \
[column_name \
for column_name in samplesheet_data.columns \
if column_name in self.samplesheet_columns ] # filter expected column names
if len(column_names) == 0:
raise ValueError('No expected columns found on the samplesheet data')
samplesheet_data = samplesheet_data[column_names] # filter samplesheet data
if self.file_format == 'samplesheet':
samplesheet._data = \
samplesheet_data.\
to_dict(orient='records') # update samplesheet object with new data
if self.remove_adapters:
for adapter_key in self.adapter_keys:
samplesheet.\
modify_sample_header(\
section=self.adapter_section,
type='remove',
condition_key=adapter_key) # remove adapters from samplesheet
samplesheet.print_sampleSheet(outfile=output_file) # print corrected samplesheet
elif self.file_format == 'csv':
samplesheet_data.to_csv(output_file,index=False) # dump samplesheet dat as csv file
except Exception as e:
raise ValueError('Failed to reformat samplesheet file {0}, error {1}'.\
format(self.infile,e)) | apache-2.0 |
stwunsch/gnuradio | gr-filter/examples/resampler.py | 58 | 4454 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
liberatorqjw/scikit-learn | examples/svm/plot_svm_nonlinear.py | 61 | 1089 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learn by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/compat/pickle_compat.py | 1 | 7903 | """
Support pre-0.12 series pickle compatibility.
"""
import contextlib
import copy
import io
import pickle as pkl
from typing import TYPE_CHECKING, Optional
import warnings
from pandas._libs.tslibs import BaseOffset
from pandas import Index
if TYPE_CHECKING:
from pandas import DataFrame, Series
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except TypeError as err:
# If we have a deprecated function,
# try to replace and try again.
msg = "_reconstruct: First argument must be a sub-type of ndarray"
if msg in str(err):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except TypeError:
pass
elif args and issubclass(args[0], BaseOffset):
# TypeError: object.__new__(Day) is not safe, use Day.__new__()
cls = args[0]
stack[-1] = cls.__new__(*args)
return
raise
_sparse_msg = """\
Loading a saved '{cls}' as a {new} with sparse values.
'{cls}' is now removed. You should re-save this dataset in its new format.
"""
class _LoadSparseSeries:
# To load a SparseSeries as a Series[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "Series", but must return
# a subtype of "_LoadSparseSeries")
def __new__(cls) -> "Series": # type: ignore[misc]
from pandas import Series
warnings.warn(
_sparse_msg.format(cls="SparseSeries", new="Series"),
FutureWarning,
stacklevel=6,
)
return Series(dtype=object)
class _LoadSparseFrame:
# To load a SparseDataFrame as a DataFrame[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "DataFrame", but must
# return a subtype of "_LoadSparseFrame")
def __new__(cls) -> "DataFrame": # type: ignore[misc]
from pandas import DataFrame
warnings.warn(
_sparse_msg.format(cls="SparseDataFrame", new="DataFrame"),
FutureWarning,
stacklevel=6,
)
return DataFrame()
# If classes are moved, provide compat here.
_class_locations_map = {
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
# 15477
("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
# 10890
("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
("pandas.sparse.series", "SparseTimeSeries"): (
"pandas.core.sparse.series",
"SparseSeries",
),
# 12588, extensions moving
("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
# 18543 moving period
("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
("pandas.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
("pandas._libs.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
# 15998 top-level dirs moving
("pandas.sparse.array", "SparseArray"): (
"pandas.core.arrays.sparse",
"SparseArray",
),
("pandas.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.sparse.frame", "SparseDataFrame"): (
"pandas.core.sparse.frame",
"_LoadSparseFrame",
),
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
("pandas.indexes.numeric", "Int64Index"): (
"pandas.core.indexes.numeric",
"Int64Index",
),
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
("pandas.tseries.index", "_new_DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"_new_DatetimeIndex",
),
("pandas.tseries.index", "DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"DatetimeIndex",
),
("pandas.tseries.period", "PeriodIndex"): (
"pandas.core.indexes.period",
"PeriodIndex",
),
# 19269, arrays moving
("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
# 19939, add timedeltaindex, float64index compat from 15998 move
("pandas.tseries.tdi", "TimedeltaIndex"): (
"pandas.core.indexes.timedeltas",
"TimedeltaIndex",
),
("pandas.indexes.numeric", "Float64Index"): (
"pandas.core.indexes.numeric",
"Float64Index",
),
("pandas.core.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.core.sparse.frame", "SparseDataFrame"): (
"pandas.compat.pickle_compat",
"_LoadSparseFrame",
),
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat and uses a non-public class of the pickle module.
# error: Name 'pkl._Unpickler' is not defined
class Unpickler(pkl._Unpickler): # type: ignore[name-defined]
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super().find_class(module, name)
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except (AttributeError, KeyError):
pass
def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
def loads(
bytes_object: bytes,
*,
fix_imports: bool = True,
encoding: str = "ASCII",
errors: str = "strict",
):
"""
Analogous to pickle._loads.
"""
fd = io.BytesIO(bytes_object)
return Unpickler(
fd, fix_imports=fix_imports, encoding=encoding, errors=errors
).load()
@contextlib.contextmanager
def patch_pickle():
"""
Temporarily patch pickle to use our unpickler.
"""
orig_loads = pkl.loads
try:
setattr(pkl, "loads", loads)
yield
finally:
setattr(pkl, "loads", orig_loads)
| bsd-3-clause |
yorkerlin/shogun | examples/undocumented/python_modular/graphical/interactive_clustering_demo.py | 10 | 11280 | """
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'o', color='0.7')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'o', color='0.5')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
if event.button==1:
label = 1.0
else:
label = -1.0
self.data.add_example(event.xdata, event.ydata, label)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
self.k.setEnabled(True)
def train_svm(self):
k = int(self.k.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ko')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'ko')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = BinaryLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
distance_name = self.distance_combo.currentText()
if distance_name == "EuclideanDistance":
distance=EuclideanDistance(train, train)
elif distance_name == "ManhattanMetric":
distance=ManhattanMetric(train, train)
elif distance_name == "JensenMetric":
distance=JensenMetric(train, train)
kmeans=KMeans(k, distance)
kmeans.train()
centers = kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
self.axes.plot(features[0,labels==+1], features[1,labels==+1],'ro')
self.axes.plot(features[0,labels==-1], features[1,labels==-1],'bo')
for i in xrange(k):
self.axes.plot(centers[0,i],centers[1,i],'kx', markersize=20, linewidth=5)
t = numpy.linspace(0, 2*numpy.pi, 100)
self.axes.plot(radi[i]*numpy.cos(t)+centers[0,i],radi[i]*numpy.sin(t)+centers[1,i],'k-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
k_label = QLabel('Number of Clusters')
self.k = QLineEdit()
self.k.setText("2")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(k_label)
spins_hbox.addWidget(self.k)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Cluster!")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.distance_combo = QComboBox()
self.distance_combo.insertItem(-1, "EuclideanDistance")
self.distance_combo.insertItem(-1, "ManhattanMetric")
self.distance_combo.insertItem(-1, "JensenMetric")
self.distance_combo.maximumSize = QSize(300, 50)
self.connect(self.distance_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.distance_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1_pos = []
self.x2_pos = []
self.x1_neg = []
self.x2_neg = []
def get_stats(self):
num_neg = len(self.x1_neg)
num_pos = len(self.x1_pos)
str_neg = "num negative examples: %i" % num_neg
str_pos = "num positive examples: %i" % num_pos
return (str_neg, str_pos)
def get_labels(self):
return numpy.array([1]*len(self.x1_pos) + [-1]*len(self.x1_neg), dtype=numpy.float64)
def get_examples(self):
num_pos = len(self.x1_pos)
num_neg = len(self.x1_neg)
examples = numpy.zeros((2,num_pos+num_neg))
for i in xrange(num_pos):
examples[0,i] = self.x1_pos[i]
examples[1,i] = self.x2_pos[i]
for i in xrange(num_neg):
examples[0,i+num_pos] = self.x1_neg[i]
examples[1,i+num_pos] = self.x2_neg[i]
return examples
def add_example(self, x1, x2, label):
if label==1:
self.x1_pos.append(x1)
self.x2_pos.append(x2)
else:
self.x1_neg.append(x1)
self.x2_neg.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| gpl-3.0 |
barentsen/dave | detrendThis/martinsff.py | 1 | 8302 | import sys, os
import scipy
from pylab import *
from matplotlib import *
from scipy.stats import *
from numpy import *
from scipy import *
import kepfit
import kepmsg
"""
This code is based on the PyKE routine kepsff
found at keplerscience.arc.nasa.gov
The kepsff code is based on Vanderberg and Johnson 2014.
If you use this you must cite V&J 2014.
"""
def martinsff(intime,indata,centr1,centr2,
npoly_cxcy,sigma_cxcy,npoly_ardx,
npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,verbose,logfile,
status):
# startup parameters
status = 0
labelsize = 16
ticksize = 14
xsize = 20
ysize = 8
lcolor = '#0000ff'
lwidth = 1.0
fcolor = '#ffff00'
falpha = 0.2
seterr(all="ignore")
# fit centroid data with low-order polynomial
cfit = zeros((len(centr2)))
csig = zeros((len(centr2)))
functype = 'poly' + str(npoly_cxcy)
pinit = array([nanmean(centr2)])
if npoly_cxcy > 0:
for j in range(npoly_cxcy):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose)
for j in range(len(coeffs)):
cfit += coeffs[j] * numpy.power(centr1,j)
csig[:] = sigma
except:
message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2)
status = kepmsg.err(logfile,message,verbose)
# sys.exit('')
os._exit(1)
# reject outliers
time_good = array([],'float64')
centr1_good = array([],'float32')
centr2_good = array([],'float32')
flux_good = array([],'float32')
cad_good = array([],dtype=bool)
for i in range(len(cfit)):
if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]:
cad_good = append(cad_good, True)
time_good = append(time_good,intime[i])
centr1_good = append(centr1_good,centr1[i])
centr2_good = append(centr2_good,centr2[i])
flux_good = append(flux_good,indata[i])
else:
#import ipdb
#ipdb.set_trace()
cad_good = append(cad_good, False)
print(intime[i])
# covariance matrix for centroid time series
centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)])
covar = cov(centr)
# eigenvector eigenvalues of covariance matrix
[eval, evec] = numpy.linalg.eigh(covar)
ex = arange(-10.0,10.0,0.1)
epar = evec[1,1] / evec[0,1] * ex
enor = evec[1,0] / evec[0,0] * ex
ex = ex + mean(centr1)
epar = epar + mean(centr2_good)
enor = enor + mean(centr2_good)
# rotate centroid data
centr_rot = dot(evec.T,centr)
# fit polynomial to rotated centroids
rfit = zeros((len(centr2)))
rsig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(centr_rot[0,:])])
pinit = array([1.0])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1,
logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100)
ry = zeros((len(rx)))
for i in range(len(coeffs)):
ry = ry + coeffs[i] * numpy.power(rx,i)
# calculate arclength of centroids
s = zeros((len(rx)))
for i in range(1,len(s)):
work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2
s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1])
# fit arclength as a function of strongest eigenvector
sfit = zeros((len(centr2)))
ssig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(s)])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correlate arclength with detrended flux
t = copy(time_good)
y = copy(flux_good)
z = centr_rot[1,:]
x = zeros((len(z)))
for i in range(len(acoeffs)):
x = x + acoeffs[i] * numpy.power(z,i)
# calculate time derivative of arclength s
dx = zeros((len(x)))
for i in range(1,len(x)):
dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1])
dx[0] = dx[1]
# fit polynomial to derivative and flag outliers (thruster firings)
dfit = zeros((len(dx)))
dsig = zeros((len(dx)))
functype = 'poly' + str(npoly_dsdt)
pinit = array([nanmean(dx)])
if npoly_dsdt > 0:
for j in range(npoly_dsdt):
pinit = append(pinit,0.0)
try:
dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \
kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
for i in range(len(dcoeffs)):
dfit = dfit + dcoeffs[i] * numpy.power(t,i)
centr1_pnt = array([],'float32')
centr2_pnt = array([],'float32')
time_pnt = array([],'float64')
flux_pnt = array([],'float32')
dx_pnt = array([],'float32')
s_pnt = array([],'float32')
time_thr = array([],'float64')
flux_thr = array([],'float32')
dx_thr = array([],'float32')
thr_cadence = zeros(len(t),dtype=bool)
for i in range(len(t)):
if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma:
time_pnt = append(time_pnt,time_good[i])
flux_pnt = append(flux_pnt,flux_good[i])
dx_pnt = append(dx_pnt,dx[i])
s_pnt = append(s_pnt,x[i])
centr1_pnt = append(centr1_pnt,centr1_good[i])
centr2_pnt = append(centr2_pnt,centr2_good[i])
else:
time_thr = append(time_thr,time_good[i])
flux_thr = append(flux_thr,flux_good[i])
dx_thr = append(dx_thr,dx[i])
thr_cadence[i] = True
# fit arclength-flux correlation
cfit = zeros((len(time_pnt)))
csig = zeros((len(time_pnt)))
functype = 'poly' + str(npoly_arfl)
pinit = array([nanmean(flux_pnt)])
if npoly_arfl > 0:
for j in range(npoly_arfl):
pinit = append(pinit,0.0)
try:
ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \
kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correction factors for unfiltered data
centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)])
centr_rot = dot(evec.T,centr)
yy = copy(indata)
zz = centr_rot[1,:]
xx = zeros((len(zz)))
cfac = zeros((len(zz)))
for i in range(len(acoeffs)):
xx = xx + acoeffs[i] * numpy.power(zz,i)
for i in range(len(ccoeffs)):
cfac = cfac + ccoeffs[i] * numpy.power(xx,i)
# apply correction to flux time-series
out_detsap = indata / cfac
#add back in the missing thr_cadence data
new_thr = np.zeros_like(cad_good)
j = 0
if np.all(cad_good == True):
pass
else:
for i,c in enumerate(cad_good):
if c == False:
j+=1
else:
new_thr[i] = thr_cadence[i-j]
return out_detsap, cfac, new_thr
| mit |
beepee14/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
jakevdp/megaman | megaman/utils/covar_plotter.py | 4 | 2672 | # LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def plot_point_cov(points, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
points : An Nx2 array of the data points.
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
if __name__ == '__main__':
#-- Example usage -----------------------
# Generate some random, correlated data
points = np.random.multivariate_normal(
mean=(1,1), cov=[[0.4, 9],[9, 10]], size=1000
)
# Plot the raw points...
x, y = points.T
plt.plot(x, y, 'ro')
# Plot a transparent 3 standard deviation covariance ellipse
plot_point_cov(points, nstd=3, alpha=0.5, color='green')
plt.show()
| bsd-2-clause |
stevenzhang18/Indeed-Flask | lib/pandas/core/nanops.py | 9 | 23144 | import itertools
import functools
import numpy as np
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.compat import builtins
from pandas.core.common import (isnull, notnull, _values_from_object,
_maybe_upcast_putmask,
ensure_float, _ensure_float64,
_ensure_int64, _ensure_object,
is_float, is_integer, is_complex,
is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype, _get_dtype,
is_int_or_datetime_dtype, is_any_int_dtype,
_int64_max)
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
try:
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and
not is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError) as e:
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = _maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isnull(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if np.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
dtype = values.dtype
mask = isnull(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values) ** 2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values,
skipna,
fill_value_typ=fill_value_typ,
)
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8','m8')
def nanskew(values, axis=None, skipna=True):
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
typ = values.dtype.type
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** typ(2)
C = (values ** 3).sum(axis) / count - A ** typ(3) - typ(3) * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt(count * count - count) * C) /
((count - typ(2)) * np.sqrt(B) ** typ(3)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8','m8')
def nankurt(values, axis=None, skipna=True):
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
typ = values.dtype.type
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** typ(2)
C = (values ** 3).sum(axis) / count - A ** typ(3) - typ(3) * A * B
D = (values ** 4).sum(axis) / count - A ** typ(4) - typ(6) * B * A * A - typ(4) * C * A
B = _zero_out_fperr(B)
D = _zero_out_fperr(D)
if not isinstance(B, np.ndarray):
# if B is a scalar, check these corner cases first before doing division
if count < 4:
return np.nan
if B == 0:
return 0
result = (((count * count - typ(1)) * D / (B * B) - typ(3) * ((count - typ(1)) ** typ(2))) /
((count - typ(2)) * (count - typ(3))))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8','m8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if np.isscalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8','m8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8','m8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
import operator
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tests/test_basic.py | 7 | 1290 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from nose.tools import assert_equal
from matplotlib.testing.decorators import knownfailureif
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
OshynSong/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
pratapvardhan/pandas | pandas/io/formats/printing.py | 6 | 13133 | """
printing tools
"""
import sys
from pandas.core.dtypes.inference import is_sequence
from pandas import compat
from pandas.compat import u
from pandas.core.config import get_option
def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop('strlen', len)
justfunc = kwargs.pop('justfunc', justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode='left')
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("{{{body}}}")
else:
fmt = u("[{body}]") if hasattr(seq, '__setitem__') else u("({body})")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(
next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt.format(body=body)
def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{{{things}}}")
pairs = []
pfmt = u("{key}: {val}")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs))
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
max_seq_items=max_seq_items)
elif (is_sequence(thing) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = u("'{thing}'")
else:
fmt = u("u'{thing}'")
result = fmt.format(thing=as_escaped_unicode(thing))
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
def _enable_data_resource_formatter(enable):
if 'IPython' not in sys.modules:
# definitely not in IPython
return
from IPython import get_ipython
ip = get_ipython()
if ip is None:
# still not in IPython
return
formatters = ip.display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
if enable:
if mimetype not in formatters:
# define tableschema formatter
from IPython.core.formatters import BaseFormatter
class TableSchemaFormatter(BaseFormatter):
print_method = '_repr_data_resource_'
_return_type = (dict,)
# register it:
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
def format_object_summary(obj, formatter, is_justify=True, name=None):
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optiona
defaults to the class name of the obj
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
if name is None:
name = obj.__class__.__name__
space1 = "\n%s" % (' ' * (len(name) + 1))
space2 = "\n%s" % (' ' * (len(name) + 2))
n = len(obj)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max(adj.len(x) for x in values)
else:
return 0
if n == 0:
summary = '[], '
elif n == 1:
first = formatter(obj[0])
summary = '[%s], ' % first
elif n == 2:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = '[%s, %s], ' % (first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
summary += '],'
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary
def format_object_attrs(obj):
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
must be iterable
Returns
-------
list
"""
attrs = []
if hasattr(obj, 'dtype'):
attrs.append(('dtype', "'{}'".format(obj.dtype)))
if getattr(obj, 'name', None) is not None:
attrs.append(('name', default_pprint(obj.name)))
max_seq_items = get_option('display.max_seq_items') or len(obj)
if len(obj) > max_seq_items:
attrs.append(('length', len(obj)))
return attrs
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/feature_selection/rfe.py | 1 | 14237 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_arrays, safe_sqr
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from .base import SelectorMixin
from ..metrics.scorer import check_scoring
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
`n_features_` : int
The number of selected features.
`support_` : array of shape [n_features]
The mask of selected features.
`ranking_` : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
`estimator_` : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_arrays(X, y, sparse_format="csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(self.step * n_features)
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
`n_features_` : int
The number of selected features with cross-validation.
`support_` : array of shape [n_features]
The mask of selected features.
`ranking_` : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
`grid_scores_` : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
`estimator_` : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
loss_func=None, estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.loss_func = loss_func
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_arrays(X, y, sparse_format="csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring,
loss_func=self.loss_func)
scores = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, mask], y_train)
score = _score(estimator, X_test[:, mask], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, max(ranking_), score))
scores[k] += score
# Pick the best number of features on average
k = np.argmax(scores)
best_score = scores[k]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=k+1,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| apache-2.0 |
lihongchun2007/SeeNN | heatmap/class_hot.py | 1 | 4056 | #!/usr/bin/env python2
import os
import sys
import pickle
import argparse
import tensorflow as tf
import numpy as np
from scipy.misc import imread, imresize
from matplotlib import pyplot as plt
def download_vgg16():
from six.moves import urllib
model_files = ['https://www.cs.toronto.edu/~frossard/vgg16/vgg16.py',
'https://www.cs.toronto.edu/~frossard/vgg16/imagenet_classes.py',
'https://www.cs.toronto.edu/~frossard/vgg16/vgg16_weights.npz'
]
files = [os.path.split(url)[-1] for url in model_files]
filename = ''
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, min(1., float(count * block_size) / float(total_size)) * 100.0))
sys.stdout.flush()
for index, url in enumerate(model_files):
if not os.path.exists(files[index]):
filename = files[index]
filepath, _ = urllib.request.urlretrieve(url, filename, _progress)
print ''
try:
from vgg16 import vgg16
from imagenet_classes import class_names
except:
download_vgg16()
from vgg16 import vgg16
from imagenet_classes import class_names
class classifier:
def __init__(self):
self.sess = tf.Session()
images = tf.placeholder(tf.float32, [None, 224, 224, 3])
self.model = vgg16(images, 'vgg16_weights.npz', self.sess)
self.class_names = class_names
self.class_name_indices = {n:i for i, n in enumerate(class_names)}
def predict(self, img, class_name):
prob = self.sess.run(self.model.probs, feed_dict={self.model.imgs:[img,]})[0]
return prob[self.class_name_indices[class_name]]
def predict_class(self, img):
prob = self.sess.run(self.model.probs, feed_dict={self.model.imgs:[img,]})[0]
preds = (np.argsort(prob))[-1]
preds = 0
print self.class_names
print prob
return self.class_names[preds], prob[preds], preds
def hot_image(image, model, class_name, window_size=50, step=25):
width, height, _ = image.shape
half_size = window_size // 2
hot_image = np.zeros((width//step + 1, height//step + 1))
i = 0
for w in range(0, width, step):
lx = max(0, w - half_size)
mx = min(width, w + half_size)
print w
j = 0
for h in range(0, height, step):
ly = max(0, h - half_size)
my = min(height, h + half_size)
mask_image = image.copy()
mask_image[lx:mx, ly:my, :] = 0
hot_image[i, j] = model.predict(mask_image, class_name)
j += 1
i += 1
hot_image = imresize(hot_image, image.shape)
return hot_image
if __name__ == '__main__':
description='Calculate how much each part of an image contributes to object classification'
argparser = argparse.ArgumentParser(description=description)
argparser.add_argument('image', help='file name of an image')
args = argparser.parse_args()
image_file_name = args.image
if image_file_name.endswith('pkl'):
img = pickle.load(open(image_file_name, 'rb'))
#img = imresize(img[0, :, :, :], (224, 224))
img = img[0, :, :, :]
#img[:, :, :] = 0
else:
img = imread(image_file_name, mode='RGB')
img = imresize(img, (224, 224))
model = classifier()
cls, prob, preds = model.predict_class(img)
print '##:', preds, cls, prob
hot_img = hot_image(img, model, cls)
hot_img = prob - hot_img
hot_img = (hot_img - hot_img.min())/(hot_img.max() - hot_img.min())
plt.imshow(img)
im = plt.imshow(hot_img, cmap='hot', interpolation='bilinear', alpha=0.6)
plt.colorbar(im)
if image_file_name.endswith('pkl'):
out_file_name = 'hot_' + os.path.split(image_file_name)[-1] + '.png'
else:
out_file_name = 'hot_' + os.path.split(image_file_name)[-1]
plt.savefig(out_file_name)
try:
plt.show()
except:
print 'Cannot show image!'
| gpl-3.0 |
andrewfullard/python-meet | exercise8.py | 1 | 2207 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 09:10:22 2013
@author: c0918140
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interpolate
def f(x):
y = -np.exp(x)*(-2. + 2.*x + 5.*x**2. + x**3.)
return y
def R(r):
y = -r**2./(1 + r**6)
return y
def solver(N):
dx = 1./(N + 2)
xspace = np.linspace(0. - dx, 1. + dx, N + 2)
x = np.linspace(0., 1., N)
A = 1./(np.ones(N + 2)*dx*dx)
B = -2./(np.ones(N + 2)*dx*dx)
B[0] = -1./(dx*dx)
B[N+1] = -3./(dx*dx)
C = 1./(np.ones(N + 2)*dx*dx)
Cprime = np.zeros(N + 2)
D = f(xspace)
Dprime = np.zeros(N + 2)
u = np.zeros(N + 2)
Cprime[1] = C[1]/B[1]
Dprime[1] = D[1]/B[1]
i = 2
while i < N:
Cprime[i] = C[i]/(B[i] - (Cprime[i - 1]*A[i]))
Dprime[i] = (D[i] - Dprime[i - 1]*A[i])/(B[i] - Cprime[i - 1]*A[i])
i += 1
i = N
while i > 0:
u[i] = Dprime[i] - Cprime[i]*u[i+1]
i -= 1
return x, u[1:N+1]
def solver_elliptic(N):
dx = 1./(N + 2)
xspace = np.linspace(0. - dx, 1. + dx, N + 2)
x = np.linspace(0. + dx, 1. - dx, N)
A = 1./(np.ones(N + 2)*dx*dx)
B = -2./(np.ones(N + 2)*dx*dx)
B[0] = -1./(dx*dx)
B[N+1] = -3./(dx*dx)
C = 1./(np.ones(N + 2)*dx*dx)
Cprime = np.zeros(N + 2)
D = R(xspace)
Dprime = np.zeros(N + 2)
u = np.zeros(N + 2)
Cprime[1] = C[1]/B[1]
Dprime[1] = D[1]/B[1]
i = 2
while i < N:
Cprime[i] = C[i]/(B[i] - (Cprime[i - 1]*A[i]))
Dprime[i] = (D[i] - Dprime[i - 1]*A[i])/(B[i] - Cprime[i - 1]*A[i])
i += 1
i = N
while i > 0:
u[i] = Dprime[i] - Cprime[i]*u[i+1]
i -= 1
return x, u[1:N+1]
x1, u1 = solver(100.)
x2, u2 = solver(200.)
u3 = interpolate.interp1d(x2,u2,'cubic')
u3c = u3(x1)
conv = u3c - u1
plt.figure("Convergence")
plt.plot(x1, conv)
plt.figure()
plt.plot(x1, u1, label="100 points")
plt.plot(x2, u2, label="200 points")
plt.title("Convergence test")
plt.legend(loc = 0)
| mit |
rubydatasystems/seniority_list | seniority_list/interactive_plotting.py | 1 | 9185 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
.. module:: interactive_plotting
:synopsis: The bokeh module contains interactive plotting functions.
.. moduleauthor:: Bob Davison <rubydatasystems@fastmail.net>
'''
from bokeh.plotting import figure, ColumnDataSource
# from bokeh.models import (HoverTool, BoxZoomTool, WheelZoomTool, ResetTool,
# PanTool, SaveTool, UndoTool, RedoTool)
from bokeh.models import NumeralTickFormatter, Range1d, Label
from bokeh.models.widgets import Slider, Button, Select
from bokeh.layouts import column, row, widgetbox
from bokeh.models.layouts import Spacer
import numpy as np
import pandas as pd
'''
TODO:
add stacked area for cat_order
test source.date update using groupby groups/precalculated ColumnDataSources
add size, alpha sliders
make tabs for right side controls
background color selection, alpha control
add datatable
add save underlying data (reports?)
add mark selected employees
add dataset selection
add diff comparison
add hover (with user selection)
add tools (crosshair, etc)
add dataset selection
add dataset group compare
add dataset employee compare
add ret_only
add other chart types
make this the only display??
add persist df
'''
def bk_basic_interactive(doc, df=None,
plot_height=700, plot_width=900,
dot_size=5):
'''run a basic interactive chart as a server app - powered by the bokeh
plotting library. Run the app in the jupyter notebook as follows:
.. code:: python
from functools import partial
import pandas as pd
import interactive_plotting as ip
from bokeh.io import show, output_notebook
from bokeh.application.handlers import FunctionHandler
from bokeh.application import Application
output_notebook()
proposal = 'p1'
df = pd.read_pickle('dill/ds_' + proposal + '.pkl')
handler = FunctionHandler(partial(ip.bk_basic_interactive, df=df))
app = Application(handler)
show(app)
inputs
doc (required input)
do not change this input
df (dataframe)
calculated dataset input, this is a required input
plot_height (integer)
height of plot in pixels
plot_width (integer)
width of plot in pixels
Add plot_height and/or plot_width parameters as kwargs within the partial
method:
.. code:: python
handler = FunctionHandler(partial(ip.bk_basic_interactive,
df=df,
plot_height=450,
plot_width=625))
Note: the "df" argument is not optional, a valid dataset variable must
be assigned.
'''
class CallbackID():
def __init__(self, identifier):
self.identifier = identifier
max_month = df['mnum'].max()
# set up color column
egs = df['eg'].values
sdict = pd.read_pickle('dill/dict_settings.pkl')
cdict = pd.read_pickle('dill/dict_color.pkl')
eg_cdict = cdict['eg_color_dict']
clr = np.empty(len(df), dtype='object')
for eg in eg_cdict.keys():
np.put(clr, np.where(egs == eg)[0], eg_cdict[eg])
df['c'] = clr
df['a'] = .7
df['s'] = dot_size
# date list for animation label background
date_list = list(pd.date_range(start=sdict['starting_date'],
periods=max_month, freq='M'))
date_list = [x.strftime('%Y %b') for x in date_list]
slider_height = plot_height - 200
# create empty data source template
source = ColumnDataSource(data=dict(x=[], y=[], c=[], s=[], a=[]))
slider_month = Slider(start=0, end=max_month,
value=0, step=1,
title='month',
height=slider_height,
width=15,
tooltips=False,
bar_color='#ffe6cc',
direction='rtl',
orientation='vertical',)
display_attrs = ['age', 'jobp', 'cat_order', 'spcnt', 'lspcnt',
'jnum', 'mpay', 'cpay', 'snum', 'lnum',
'ylong', 'mlong', 'idx', 'retdate', 'ldate',
'doh', 's_lmonths', 'new_order']
sel_x = Select(options=display_attrs,
value='age',
title='x axis attribute:',
width=115, height=45)
sel_y = Select(options=display_attrs,
value='spcnt',
title='y axis attribute:',
width=115, height=45)
label = Label(x=20, y=plot_height - 150,
x_units='screen', y_units='screen',
text='', text_alpha=.25,
text_color='#b3b3b3',
text_font_size='70pt')
spacer1 = Spacer(height=plot_height, width=30)
but_fwd = Button(label='FWD', width=60)
but_back = Button(label='BACK', width=60)
add_sub = widgetbox(but_fwd, but_back, height=50, width=30)
def make_plot():
this_df = get_df()
xcol = sel_x.value
ycol = sel_y.value
source.data = dict(x=this_df[sel_x.value],
y=this_df[sel_y.value],
c=this_df['c'],
a=this_df['a'],
s=this_df['s'])
non_invert = ['age', 'idx', 's_lmonths', 'mlong',
'ylong', 'cpay', 'mpay']
if xcol in non_invert:
xrng = Range1d(df[xcol].min(), df[xcol].max())
else:
xrng = Range1d(df[xcol].max(), df[xcol].min())
if ycol in non_invert:
yrng = Range1d(df[ycol].min(), df[ycol].max())
else:
yrng = Range1d(df[ycol].max(), df[ycol].min())
p = figure(plot_width=plot_width,
plot_height=plot_height,
x_range=xrng,
y_range=yrng,
title='')
p.circle(x='x', y='y', color='c', size='s', alpha='a',
line_color=None, source=source)
pcnt_cols = ['spcnt', 'lspcnt']
if xcol in pcnt_cols:
p.x_range.end = -.001
p.xaxis[0].formatter = NumeralTickFormatter(format="0.0%")
if ycol in pcnt_cols:
p.y_range.end = -.001
p.yaxis[0].formatter = NumeralTickFormatter(format="0.0%")
if xcol in ['cat_order']:
p.x_range.end = -50
if ycol in ['cat_order']:
p.y_range.end = -50
if xcol in ['jobp', 'jnum']:
p.x_range.end = .95
if ycol in ['jobp', 'jnum']:
p.y_range.end = .95
p.xaxis.axis_label = sel_x.value
p.yaxis.axis_label = sel_y.value
p.add_layout(label)
label.text = date_list[slider_month.value]
return p
def get_df():
filter_df = df[df.mnum == slider_month.value][[sel_x.value,
sel_y.value,
'c', 's', 'a']]
return filter_df
def update_data(attr, old, new):
this_df = get_df()
source.data = dict(x=this_df[sel_x.value],
y=this_df[sel_y.value],
c=this_df['c'],
a=this_df['a'],
s=this_df['s'])
label.text = date_list[new]
controls = [sel_x, sel_y]
wb_controls = [sel_x, sel_y, slider_month]
for control in controls:
control.on_change('value', lambda attr, old, new: insert_plot())
slider_month.on_change('value', update_data)
sizing_mode = 'fixed'
inputs = widgetbox(*wb_controls, width=190, height=60,
sizing_mode=sizing_mode)
def insert_plot():
lo.children[0] = make_plot()
def animate_update():
mth = slider_month.value + 1
if mth > max_month:
mth = 0
slider_month.value = mth
def fwd():
slider_val = slider_month.value
if slider_val < max_month:
slider_month.value = slider_val + 1
def back():
slider_val = slider_month.value
if slider_val > 0:
slider_month.value = slider_val - 1
but_back.on_click(back)
but_fwd.on_click(fwd)
cb = CallbackID(None)
def animate():
if play_button.label == '► Play':
play_button.label = '❚❚ Pause'
cb.identifier = doc.add_periodic_callback(animate_update, 350)
else:
play_button.label = '► Play'
doc.remove_periodic_callback(cb.identifier)
def reset():
slider_month.value = 0
play_button = Button(label='► Play', width=60)
play_button.on_click(animate)
reset_button = Button(label='Reset', width=60)
reset_button.on_click(reset)
lo = row(make_plot(), spacer1, inputs, column(play_button,
reset_button,
add_sub))
doc.add_root(lo)
| gpl-3.0 |
piersy/theano-machine-learning | 2_logistic_regression.py | 1 | 1696 | import matplotlib.pyplot as plt
import theano
from theano import tensor as T
import numpy as np
from load import mnist
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def model(X, w):
return T.nnet.softmax(T.dot(X, w))
trX, teX, trY, teY = mnist(onehot=True)
X = T.fmatrix()
Y = T.fmatrix()
# 10 possible classifications 784 is inputvector size sqrt(784) is input image side length
w = init_weights((784, 10))
py_x = model(X, w)
y_pred = T.argmax(py_x, axis=1)
# categorical_crossentropy basically tells theano to minimize the correct classification as defined by Y
cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
gradient = T.grad(cost=cost, wrt=w)
update = [[w, w - gradient * 0.05]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=update, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
# print "start:"+str(start)
# print "end:"+str(end)
cost = train(trX[start:end], trY[start:end])
print i, np.mean(np.argmax(teY, axis=1) == predict(teX))
# show the learnt stuff
plt.figure(figsize=(1, 10))
for i in range(10):
sub = plt.subplot(1, 10, i)
t = w.get_value()[:, i].reshape((28, 28))
sub.axis("off")
sub.imshow(t, cmap=plt.cm.gray, interpolation="nearest")
plt.show()
# sub.imshow(true_face.reshape(image_shape),
# cmap=plt.cm.gray,
# interpolation="nearest")
# f = open('ramp.png', 'wb') # binary mode is important
| mit |
manterd/myPhyloDB | functions/analysis/pca_graphs.py | 1 | 28862 | import datetime
from django.http import HttpResponse
import logging
import numpy as np
import pandas as pd
from pyper import *
import json
import functions
LOG_FILENAME = 'error_log.txt'
pd.set_option('display.max_colwidth', -1)
def getPCA(request, stops, RID, PID):
try:
while True:
if request.is_ajax():
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 5: Reading normalized data file...')
functions.setBase(RID, 'Step 2 of 5 Selecting your chosen meta-variables...')
selectAll = int(all["selectAll"])
keggAll = int(all["keggAll"])
nzAll = int(all["nzAll"])
method = all["Method"]
scale = all['scaled']
constrain = all["constrain"]
PC1 = int(all["PC1"])
PC2 = int(all["PC2"])
result = ''
treeType = int(all['treeType'])
if treeType == 1:
if selectAll == 1:
result += 'Taxa level: Kingdom' + '\n'
elif selectAll == 2:
result += 'Taxa level: Phyla' + '\n'
elif selectAll == 3:
result += 'Taxa level: Class' + '\n'
elif selectAll == 4:
result += 'Taxa level: Order' + '\n'
elif selectAll == 5:
result += 'Taxa level: Family' + '\n'
elif selectAll == 6:
result += 'Taxa level: Genus' + '\n'
elif selectAll == 7:
result += 'Taxa level: Species' + '\n'
elif selectAll == 9:
result += 'Taxa level: OTU_99' + '\n'
elif treeType == 2:
if keggAll == 1:
result += 'KEGG Pathway level: 1' + '\n'
elif keggAll == 2:
result += 'KEGG Pathway level: 2' + '\n'
elif keggAll == 3:
result += 'KEGG Pathway level: 3' + '\n'
elif treeType == 3:
if nzAll == 1:
result += 'KEGG Enzyme level: 1' + '\n'
elif nzAll == 2:
result += 'KEGG Enzyme level: 2' + '\n'
elif nzAll == 3:
result += 'KEGG Enzyme level: 3' + '\n'
elif nzAll == 4:
result += 'KEGG Enzyme level: 4' + '\n'
elif keggAll == 5:
result += 'KEGG Enzyme level: GIBBs' + '\n'
elif keggAll == 6:
result += 'KEGG Enzyme level: Nitrogen cycle' + '\n'
# Select samples and meta-variables from savedDF
metaValsCat = all['metaValsCat']
metaIDsCat = all['metaIDsCat']
metaValsQuant = all['metaValsQuant']
metaIDsQuant = all['metaIDsQuant']
treeType = int(all['treeType'])
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar)
allFields = catFields + quantFields
result = ''
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 2 of 5: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 3 of 5: Selecting your chosen taxa or KEGG level...')
# filter otus based on user settings
remUnclass = all['remUnclass']
remZeroes = all['remZeroes']
perZeroes = int(all['perZeroes'])
filterData = all['filterData']
filterPer = int(all['filterPer'])
filterMeth = int(all['filterMeth'])
mapTaxa = 'no'
finalDF = pd.DataFrame()
if treeType == 1:
if selectAll != 8:
filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remZeroes, perZeroes, filterData, filterPer, filterMeth)
else:
filteredDF = savedDF.copy()
finalDF, missingList = functions.getTaxaDF(selectAll, '', filteredDF, metaDF, allFields, DepVar, RID, stops, PID)
if selectAll == 8:
result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n'
result += '===============================================\n'
if treeType == 2:
finalDF, allDF = functions.getKeggDF(keggAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if treeType == 3:
finalDF, allDF = functions.getNZDF(nzAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if finalDF.empty:
error = "Selected taxa were not found in your selected samples."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
# make sure column types are correct
finalDF[catFields] = finalDF[catFields].astype(str)
# transform Y, if requested
transform = int(all["transform"])
finalDF = functions.transformDF(transform, DepVar, finalDF)
# save location info to session
myDir = 'myPhyloDB/media/temp/pca/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF)
# STOP SHARED
# START STATSGRAPH
count_rDF = pd.DataFrame()
if DepVar == 0:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund')
elif DepVar == 1:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rel_abund')
elif DepVar == 2:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rich')
elif DepVar == 3:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='diversity')
elif DepVar == 4:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund_16S')
count_rDF.fillna(0, inplace=True)
functions.setBase(RID, 'Step 3 of 5: Selecting your chosen taxa or KEGG level...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 4 of 5: Performing statistical test...')
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
r("list.of.packages <- c('fpc', 'vegan', 'ggplot2')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
print r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 4 of 5: Performing statistical test...')
r("options(width=5000)")
print r('library(fpc)')
print r('library(ggplot2)')
print r('library(vegan)')
print r('source("R/myFunctions/myFunctions.R")')
count_rDF.sort_index(axis=0, inplace=True)
r.assign("data", count_rDF)
r.assign("cols", count_rDF.columns.values.tolist())
r("colnames(data) <- unlist(cols)")
metaDF.sort_values('sampleid', inplace=True)
r.assign("meta", metaDF)
r.assign("rows", metaDF.index.values.tolist())
r("rownames(meta) <- unlist(rows)")
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
r.assign("PC1", PC1)
r.assign("PC2", PC2)
# Can only constrain if meta-variables have been selected
constrain2 = 'no'
if constrain == 'yes':
if catFields or quantFields:
constrain2 = 'yes'
else:
constrain2 = 'no'
if not method == 'decorana':
if constrain2 == 'no':
if scale == 'yes':
pca_string = 'res.pca <- ' + method + '(data, scale=TRUE)'
r.assign("cmd", pca_string)
r("eval(parse(text=cmd))")
else:
pca_string = 'res.pca <- ' + method + '(data, scale=FALSE)'
r.assign("cmd", pca_string)
r("eval(parse(text=cmd))")
if constrain2 == 'yes':
if scale == 'yes':
pca_string = 'res.pca <- ' + method + '(data ~ ., data=meta, scale=TRUE)'
r.assign("cmd", pca_string)
r("eval(parse(text=cmd))")
else:
pca_string = 'res.pca <- ' + method + '(data ~ ., data=meta, scale=FALSE)'
r.assign("cmd", pca_string)
r("eval(parse(text=cmd))")
if method == 'decorana':
pca_string = 'res.pca <- ' + method + '(data)'
r.assign("cmd", pca_string)
r("eval(parse(text=cmd))")
res = r.get('res.pca')
if res is None:
error = "Your analysis failed due to infinite or missing values.\nPlease try tranforming your data and/or selecting different samples."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result += str(r('print(res.pca)')) + '\n'
result += '===============================================\n'
addContrib1 = all['addContrib1']
contribVal1 = float(all['contribVal1'])
addContrib2 = all['addContrib2']
contribVal2 = float(all['contribVal2'])
# Use vegan to calculate regression between ord axes and quantFields
if addContrib1 == 'yes':
r("ef1 <- envfit(res.pca, data, add=False)")
if quantFields and addContrib2 == 'yes':
r.assign('quantFields', quantFields)
r('ef2 <- envfit(res.pca, meta[,paste(quantFields)], add=False)')
# get scores from vegan
r('sites <- scores(res.pca, display="sites", choices=c(PC1,PC2))')
r('species <- scores(res.pca, display="species", choices=c(PC1,PC2))')
ellipseVal = all['ellipseVal']
if ellipseVal == 'None':
r("ellipseTrt <- c('All')")
if ellipseVal == 'interaction':
r.assign("catFields", catFields)
r("ellipseTrt <- interaction(meta[,paste(catFields)])")
if ellipseVal != 'None' and ellipseVal != 'k-means' and ellipseVal != 'interaction':
r.assign("ellipseVal", ellipseVal)
r("ellipseTrt <- as.factor(meta[,paste(ellipseVal)])")
if ellipseVal != 'None' and ellipseVal == 'k-means':
r("pamk.best <- pamk(sites)")
r("km <- kmeans(sites, centers=pamk.best$nc)")
r("ellipseTrt <- as.factor(paste('k-cluster: ', km$cluster, sep=''))")
r("if (!exists('ellipseTrt')) {ellipseTrt <- c('All')}")
colorVal = all['colorVal']
if colorVal == 'None':
r("colorTrt <- c('All')")
if colorVal == 'interaction':
r.assign("catFields", catFields)
r("colorTrt <- interaction(meta[,paste(catFields)])")
if colorVal != 'None' and colorVal != 'k-means' and colorVal != 'interaction':
r.assign("colorVal", colorVal)
r("colorTrt <- as.factor(meta[,paste(colorVal)])")
if colorVal != 'None' and colorVal == 'k-means':
r("pamk.best <- pamk(sites)")
r("km <- kmeans(sites, centers=pamk.best$nc)")
r("colorTrt <- as.factor(paste('k-cluster: ', km$cluster, sep=''))")
r("if (!exists('colorTrt')) {colorTrt <- c('All')}")
shapeVal = all['shapeVal']
if shapeVal == 'None':
r("shapeTrt <- 'All'")
if shapeVal == 'interaction':
r.assign("catFields", catFields)
r("shapeTrt <- interaction(meta[,paste(catFields)])")
if shapeVal != 'None' and shapeVal != 'k-means' and shapeVal != 'interaction':
r.assign("shapeVal", shapeVal)
r("shapeTrt <- as.factor(meta[,paste(shapeVal)])")
if shapeVal != 'None' and shapeVal == 'k-means':
r("pamk.best <- pamk(sites)")
r("km <- kmeans(sites, centers=pamk.best$nc)")
r("shapeTrt <- as.factor(paste('k-cluster: ', km$cluster, sep=''))")
r("if (!exists('shapeTrt')) {shapeTrt <- c('All')}")
r("indDF <- data.frame( \
x=sites[,PC1], \
y=sites[,PC2], \
Color=colorTrt, \
Shape=shapeTrt, \
Fill=ellipseTrt) \
")
gridVal_X = all['gridVal_X']
if gridVal_X != 'None':
r.assign("gridVal_X", gridVal_X)
r("indDF$myGrid_X <- meta[,paste(gridVal_X)]")
gridVal_Y = all['gridVal_Y']
if gridVal_Y != 'None':
r.assign("gridVal_Y", gridVal_Y)
r("indDF$myGrid_Y <- meta[,paste(gridVal_Y)]")
r("varDF <- data.frame( \
x=species[,PC1], \
y=species[,PC2]) \
")
# get taxa rank names
rankNameDF = finalDF.drop_duplicates(subset='rank_id', keep='last')
rankNameDF.set_index('rank_id', inplace=True)
rankNameDF['rank_name'] = rankNameDF['rank_name'].str.split('|').str[-1]
r.assign('rankNameDF', rankNameDF['rank_name'])
r('varDF <- merge(varDF, rankNameDF, by="row.names", all.x=TRUE)')
# rescale
r("mult <- min(max(indDF$x)-min(indDF$x)/(max(varDF$x)-min(varDF$x)), max(indDF$y)-min(indDF$y)/(max(varDF$y)-min(varDF$y)))")
# Create biplot using ggplot
r("p <- ggplot(indDF, aes(x,y))")
if gridVal_X != 'None' and gridVal_Y == 'None':
r("p <- p + facet_grid(. ~ myGrid_X)")
r("p <- p + theme(strip.text.x=element_text(size=10, colour='blue', angle=0))")
elif gridVal_X == 'None' and gridVal_Y != 'None':
r("p <- p + facet_grid(myGrid_Y ~ .)")
r("p <- p + theme(strip.text.y=element_text(size=10, colour='blue', angle=90))")
elif gridVal_X != 'None' and gridVal_Y != 'None':
r("p <- p + facet_grid(myGrid_Y ~ myGrid_X)")
r("p <- p + theme(strip.text.x=element_text(size=10, colour='blue', angle=0))")
r("p <- p + theme(strip.text.y=element_text(size=10, colour='blue', angle=90))")
myPalette = all['palette']
r.assign("myPalette", myPalette)
r('number <- nlevels(indDF$Shape)')
r('shapes <- rep(c(21, 22, 23, 24, 25), length.out = number) ')
if not colorVal == 'None':
if not shapeVal == 'None':
r("p <- p + geom_point(aes(fill=factor(Color), shape=factor(Shape)), size=4)")
r("p <- p + scale_fill_brewer(name='Symbol-colors', palette=myPalette, guide=guide_legend(override.aes=list(shape=21)))")
r("p <- p + scale_shape_manual(name='Symbol-shapes', values=shapes)")
else:
r("p <- p + geom_point(aes(fill=factor(Color)), shape=21, size=4)")
r("p <- p + scale_fill_brewer(name='Symbol-colors', palette=myPalette, guide=guide_legend(override.aes=list(shape=21)))")
else:
if not shapeVal == 'None':
r("p <- p + geom_point(aes(shape=factor(Shape)), size=4)")
r("p <- p + scale_shape_manual(name='Symbol-shapes', values=shapes)")
else:
r("p <- p + geom_point(color='gray', size=4)")
if not ellipseVal == 'None':
myCI = float(all["CI"])
r.assign("myCI", myCI)
r("p <- p + stat_ellipse(aes(color=factor(Fill)), geom='polygon', level=myCI, alpha=0)")
r("p <- p + scale_color_brewer(palette=myPalette)")
r("p <- p + guides(color=guide_legend('Ellipse-colors'))")
r("p <- p + geom_hline(aes(yintercept=0), linetype='dashed')")
r("p <- p + geom_vline(aes(xintercept=0), linetype='dashed')")
if addContrib1 == 'yes':
r('efDF <- as.data.frame(ef1$vectors$arrows*ef1$vectors$r)')
r('efDF$p <- ef1$vectors$pvals')
r('pvals.adj <- round(p.adjust(efDF$p, method="BH"),3)')
r('efDF$p.adj <- pvals.adj')
r('efDF <- efDF[ order(row.names(efDF)), ]')
r('row.names(varDF) <- varDF$Row.names')
r('varDF <- varDF[ order(row.names(varDF)), ]')
r('efDF$label <- varDF$rank_name')
# scale and remove non-significant objects
r.assign("contribVal1", contribVal1)
r('efDF.adj <- efDF[efDF$p <= paste(contribVal1),]')
r('efDF.adj$v1 <- efDF.adj[,PC1] * mult * 0.7')
r('efDF.adj$v2 <- efDF.adj[,PC2] * mult * 0.7')
efDF_adj = r.get("efDF.adj")
# send data to result string
envfit = r("efDF")
result += 'Envfit results for species scores\n'
result += str(envfit) + '\n'
result += '===============================================\n'
if not efDF_adj.empty:
r("p <- p + geom_segment(data=efDF.adj, aes(x=0, y=0, xend=v1, yend=v2), arrow=arrow(length=unit(0.2,'cm')), alpha=0.75, color='blue')")
r("p <- p + geom_text(data=efDF.adj, aes(x=v1, y=v2, label=label, vjust=ifelse(v2 >= 0, -1, 2)), size=3, color='blue')")
if quantFields and addContrib2 == 'yes':
r('efDF <- data.frame(ef2$vectors$arrows*sqrt(ef2$vectors$r))')
r('efDF$p <- ef2$vectors$pvals')
r('pvals.adj <- round(p.adjust(efDF$p, method="BH"),3)')
r('efDF$p.adj <- pvals.adj')
r('efDF$label <- unlist(quantFields)')
# scale and remove non-significant objects
r.assign("contribVal2", contribVal2)
r('efDF.adj <- efDF[efDF$p < paste(contribVal2),]')
r('efDF.adj$v1 <- efDF.adj[,PC1] * mult * 0.7')
r('efDF.adj$v2 <- efDF.adj[,PC2] * mult * 0.7')
efDF_adj = r.get('efDF.adj')
# send data to result string
envfit = r("efDF")
result += 'EnvFit for selected quantitative variables\n'
result += str(envfit) + '\n'
result += '===============================================\n'
if not efDF_adj.empty:
r("p <- p + geom_segment(data=efDF.adj, aes(x=0, y=0, xend=v1, yend=v2), arrow=arrow(length=unit(0.2,'cm')), alpha=0.75, color='red')")
r("p <- p + geom_text(data=efDF.adj, aes(x=v1, y=v2, label=label, vjust=ifelse(v2 >= 0, -1, 2)), size=3, color='red')")
# add labels to plot
r("p <- p + ggtitle('Biplot of variables and individuals')")
if method != 'decorana':
r("eig <- eigenvals(res.pca)")
else:
r("eig <- res.pca$evals")
r("perExp <- eig / sum(eig) * 100")
r("p <- p + xlab(paste('Axis', PC1, ' (', round(perExp[[PC1]], 1), '%)', sep=''))")
r("p <- p + ylab(paste('Axis', PC2, ' (', round(perExp[[PC2]], 1), '%)', sep=''))")
path = "myPhyloDB/media/temp/pca/Rplots"
if not os.path.exists(path):
os.makedirs(path)
r.assign("path", path)
r.assign("RID", RID)
r("file <- paste(path, '/', RID, '.pca.pdf', sep='')")
r("p <- set_panel_size(p, height=unit(2.9, 'in'), width=unit(2.9, 'in'))")
r("nlev <- nlevels(as.factor(indDF$myGrid_X))")
r('if (nlev == 0) { \
myWidth <- 8 \
} else { \
myWidth <- 3*nlev+4 \
}')
r("nlev <- nlevels(as.factor(indDF$myGrid_Y))")
r('if (nlev == 0) { \
myHeight <- 8 \
} else { \
myHeight <- 3*nlev+4 \
}')
r("ggsave(filename=file, plot=p, units='in', height=myHeight, width=myWidth, limitsize=F)")
functions.setBase(RID, 'Step 4 of 5: Performing statistical test...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 5 of 5: Formatting graph data...')
finalDict = {}
r("options(width=5000)")
finalDict['text'] = result
## variables
nameDF = finalDF[['rank_id']].drop_duplicates(subset='rank_id', keep='last')
nameDF.set_index('rank_id', inplace=True)
r("df <- data.frame(species)")
tempDF = r.get("df")
IDs = r.get("row.names(df)")
tempDF['id'] = IDs
tempDF.set_index('id', inplace=True)
varCoordDF = pd.merge(nameDF, tempDF, left_index=True, right_index=True, how='inner')
varCoordDF.reset_index(drop=False, inplace=True)
varCoordDF.rename(columns={'index': 'rank_id'}, inplace=True)
if treeType == 1:
idList = functions.getFullTaxonomy(list(varCoordDF.rank_id.unique()))
varCoordDF['Taxonomy'] = varCoordDF['rank_id'].map(idList)
elif treeType == 2:
idList = functions.getFullKO(list(varCoordDF.rank_id.unique()))
varCoordDF['Taxonomy'] = varCoordDF['rank_id'].map(idList)
elif treeType == 3:
idList = functions.getFullNZ(list(varCoordDF.rank_id.unique()))
varCoordDF['Taxonomy'] = varCoordDF['rank_id'].map(idList)
varCoordDF.replace(to_replace='N/A', value=np.nan, inplace=True)
varCoordDF.dropna(axis=1, how='all', inplace=True)
table = varCoordDF.to_html(classes="table display")
table = table.replace('border="1"', 'border="0"')
finalDict['varCoordDF'] = str(table)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if ellipseVal == 'k-means' or colorVal == 'k-means' or shapeVal == 'k-means':
r("df <- data.frame(km$cluster, sites)")
else:
r("df <- data.frame(sites)")
tempDF = r.get("df")
if not metaDF.empty:
tempDF['id'] = metaDF.index.values.tolist()
tempDF.set_index('id', inplace=True)
indCoordDF = pd.merge(metaDF, tempDF, left_index=True, right_index=True, how='inner')
indCoordDF.reset_index(drop=False, inplace=True)
indCoordDF.rename(columns={'index': 'rank_id', ' km.cluster ': 'k-means cluster'}, inplace=True)
else:
indCoordDF = tempDF.copy()
indCoordDF.rename(columns={' km.cluster ': 'k-means cluster'}, inplace=True)
table = indCoordDF.to_html(classes="table display")
table = table.replace('border="1"', 'border="0"')
finalDict['indCoordDF'] = str(table)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
| gpl-3.0 |
tdgoodrich/mase | python101/code/zipf.py | 14 | 1453 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import sys
import string
import matplotlib.pyplot as pyplot
from analyze_book import *
def rank_freq(hist):
"""Returns a list of tuples where each tuple is a rank
and the number of times the item with that rank appeared.
"""
# sort the list of frequencies in decreasing order
freqs = hist.values()
freqs.sort(reverse=True)
# enumerate the ranks and frequencies
rf = [(r+1, f) for r, f in enumerate(freqs)]
return rf
def print_ranks(hist):
"""Prints the rank vs. frequency data."""
for r, f in rank_freq(hist):
print r, f
def plot_ranks(hist, scale='log'):
"""Plots frequency vs. rank."""
t = rank_freq(hist)
rs, fs = zip(*t)
pyplot.clf()
pyplot.xscale(scale)
pyplot.yscale(scale)
pyplot.title('Zipf plot')
pyplot.xlabel('rank')
pyplot.ylabel('frequency')
pyplot.plot(rs, fs, 'r-')
pyplot.show()
def main(name, filename='emma.txt', flag='plot', *args):
hist = process_file(filename, skip_header=True)
# either print the results or plot them
if flag == 'print':
print_ranks(hist)
elif flag == 'plot':
plot_ranks(hist)
else:
print 'Usage: zipf.py filename [print|plot]'
if __name__ == '__main__':
main(*sys.argv)
| unlicense |