repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Vimos/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 11 | 7453 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
linalg.pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
idlead/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
bthirion/scikit-learn | sklearn/utils/multiclass.py | 2 | 14743 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
| bsd-3-clause |
deepesch/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
freedomtan/tensorflow | tensorflow/python/autograph/core/config.py | 11 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import config_lib
Action = config_lib.Action
Convert = config_lib.Convert
DoNotConvert = config_lib.DoNotConvert
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
CONVERSION_RULES = (
# Known packages
Convert('tensorflow.python.training.experimental'),
# Builtin modules
DoNotConvert('collections'),
DoNotConvert('copy'),
DoNotConvert('cProfile'),
DoNotConvert('inspect'),
DoNotConvert('ipdb'),
DoNotConvert('linecache'),
DoNotConvert('mock'),
DoNotConvert('pathlib'),
DoNotConvert('pdb'),
DoNotConvert('posixpath'),
DoNotConvert('pstats'),
DoNotConvert('re'),
DoNotConvert('threading'),
DoNotConvert('urllib'),
# Known libraries
DoNotConvert('matplotlib'),
DoNotConvert('numpy'),
DoNotConvert('pandas'),
DoNotConvert('tensorflow'),
DoNotConvert('PIL'),
# TODO(b/133417201): Remove.
DoNotConvert('tensorflow_probability'),
# TODO(b/133842282): Remove.
DoNotConvert('tensorflow_datasets.core'),
)
| apache-2.0 |
lcharleux/numerical_analysis | doc/Optimisation/Example_code/regression.py | 2 | 2015 | #------------------------------------------------------------------------
# Regression ou "fit"
#------------------------------------------------------------------------
# PACKAGES
from scipy import optimize as opt # Optimize
import numpy as np # Numpy
import matplotlib.pyplot as plt # Pyplot
from matplotlib import cm # Colormaps
# CONDITIONS INITIALES
# Remarque: il est hautement conseille de jouer avec ces parametres pour voir leur effet.
tau0 = 1. # Amortissement initial
omega0 = 1. # Pulsation initiale
# FONCTIONS UTILES
def fonction(tau, omega, x):
'''
Une fonction
'''
return np.sin(omega * x ) * np.exp(-x/tau)
def erreur(params):
'''
Mise en forme de la fonction cout pour usage dans fmin
'''
e = 0.
for i in xrange(len(x)):
e += (fonction(params[0], params[1], x[i]) - y[i])**2
return e
# RESOLUTION
Np = 100
tau_sol = 5.
omega_sol = 4.
bruit = 1.
x = np.linspace(0., 10., Np)
y_perfect = fonction(tau_sol, omega_sol, x)
y = y_perfect + bruit * (np.random.rand(Np) -.5)
# Methode intuitive
t = np.linspace(1., 5., 100)
Tau, Omega = np.meshgrid(t,t)
Err = erreur((Tau, Omega))
# Methode du simplexe
params0 = np.array([tau0, omega0])
sol, steps = opt.fmin(erreur, params0, retall = True)
steps = np.array(steps).transpose()
tau = steps[0]
omega = steps[1]
tau_f = sol[0]
omega_f = sol[1]
y_f = fonction(tau_f, omega_f, x)
# AFFICHAGE
N = 20
fig = plt.figure()
plt.clf()
fig.add_subplot(121)
plt.plot(x,y_perfect, 'kd', linewidth = 2., label = 'Donnees')
plt.plot(x,y, 'or', label = 'Donnees + Bruit')
for i in xrange(len(tau)):
tau_i = tau[i]
omega_i = omega[i]
y_i = fonction(tau_i, omega_i, x)
plt.plot(x,y_i, 'g-', linewidth = 1.)
plt.plot(x,y_f, 'b-', linewidth = 1., label = 'Solution')
plt.legend()
fig.add_subplot(122)
plt.title('Erreur')
plt.contourf(Tau, Omega, Err, N)
plt.colorbar()
plt.contour(Tau, Omega, Err, N, colors = 'black')
plt.grid()
plt.plot(tau, omega, 'go-', linewidth = 2.)
plt.show()
| gpl-2.0 |
jamesblunt/kaggle-galaxies | predict_augmented_npy_maxout2048_pysex.py | 7 | 9584 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_pysex.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
ibab/tensorflow | tensorflow/examples/skflow/multiple_gpu.py | 9 | 1658 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics, cross_validation
import tensorflow as tf
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
def my_model(X, y):
"""
This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
"""
with tf.device('/gpu:1'):
layers = learn.ops.dnn(X, [10, 20, 10], dropout=0.5)
with tf.device('/gpu:2'):
return learn.models.logistic_regression(layers, y)
classifier = learn.TensorFlowEstimator(model_fn=my_model, n_classes=3)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
threecgreen/SpotiGraph | stats/views.py | 1 | 3359 | """
Contains the view functions and their helper functions for selecting and exploring data on a user's playlists.
View functions:
selection
basic_stats
Helper functions:
get_spotify_session
extract_selection_data
"""
import pandas as pd
import spotipy as spy
from collections import namedtuple
from typing import List
from django.http import HttpResponse, HttpRequest
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# namedtuple for storing playlist data for easy access within the django template
Playlist = namedtuple("Playlist", ["id", "name", "owner", "songs"])
@login_required
def selection(request: HttpRequest) -> HttpResponse:
"""
View function for a page where users select a playlist(s) to analyze.
Args:
request: Page request.
Returns:
Page containing a list of the user's playlists.
"""
spotify = get_spotify_session(request)
# TODO handle for more than 50 playlists with pagination
playlist_data = spotify.current_user_playlists()["items"]
user_playlists = extract_selection_data(playlist_data)
# TODO handle users with no playlists
assert len(user_playlists) > 0
context = {
"user": request.user,
"user_playlists": user_playlists,
"page_name": "Playlist Selection",
}
return render(request, "selection.html", context)
def get_spotify_session(request: HttpRequest) -> spy.Spotify:
"""
Gets a spotipy module spotify session for more idiomatically interacting with the Spotify API.
Args:
request: HTTP request from a logged-in user.
Returns:
Spotify object for easier interaction with the Spotify web API.
"""
social = request.user.social_auth.get(provider="spotify")
token = social.extra_data["access_token"]
return spy.Spotify(auth=token)
def extract_selection_data(playlists: List[dict]) -> List[Playlist]:
"""
Takes the complex list of nested dictionaries and extracts the needed information for use in the selection page.
Extracts:
playlist ID
playlist name
playlist owner
number of songs in the playlist
Args:
playlists: "items" list of dicts from the original playlist data received from the Spotify API.
Returns:
List of namedtuples of type Playlist containing required information for the selection page ordered
alphabetically by playlist name.
"""
new_playlists = []
for playlist in playlists:
playlist_tup = Playlist(id=playlist["id"],
name=playlist["name"],
owner=playlist["owner"]["id"],
songs=playlist["tracks"]["total"])
new_playlists.append(playlist_tup)
# Sort alphabetically by name
return sorted(new_playlists, key=lambda tup: tup.name)
@login_required
def basic_stats(request: HttpRequest, username: str, playlist_id: str) -> HttpResponse:
spotify = get_spotify_session(request)
tracks_data = spotify.user_playlist_tracks(username, playlist_id, limit=1)
context = {
"user": request.user,
"tracks": tracks_data,
}
return render(request, "basic_stats.html", context)
def get_playlist_data(username: str, playlist_id: str):
pass
| mit |
kayarre/dicomwrangle | taginvestigation.py | 1 | 3010 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 13:45:03 2015
@author: sansomk
"""
import dicom
import os
#import numpy as np
#import matplotlib.pyplot as plt
#from matplotlib.widgets import Slider, Button, RadioButtons
import fnmatch
#dcmpath='/Users/sansomk/Downloads/E431791260_FlowVol_01/' # mac
dcmpath = "/home/sansomk/caseFiles/mri/images/E431791260_FlowVol_01/mag"
dcm_files = []
count = 0
dict_test = {}
# tags
# TriggerTime = time of the image
# SliceLocation = spatial location of slice.
# SliceThickness = the thickness of the image
# need to figure our how to convert images to axial ones
slice_location = []
trigger_time = []
image_dict = {}
count = 0
fn_dict = {"X":"FlowX_*.dcm", "Y":"Flowy_*.dcm", "Z":"FlowZ_*.dcm", "MAG":"Mag_*.dcm"}
new_dict = {}
for dirname, subdirlist, filelist in os.walk(dcmpath):
for filen in filelist:
try:
filePath = os.path.join(dirname,filen)
#print(filePath)
f = dicom.read_file(filePath, stop_before_pixels=True)
#print(dirname, subdirlist, filelist)
#print(filePath)
#print(f.SliceLocation)
except:
print("error: {0}".format(filen))
continue
#dictionary of images
if (f.TriggerTime not in image_dict.keys()):
image_dict[f.TriggerTime] = {}
if (f.SliceLocation not in image_dict[f.TriggerTime].keys()):
image_dict[f.TriggerTime][f.SliceLocation] = {}
for fn_key in fn_dict.keys():
if( fn_key not in image_dict[f.TriggerTime][f.SliceLocation].keys()):
image_dict[f.TriggerTime][f.SliceLocation][fn_key] = {}
#print(fn_key, filen, fn_dict[fn_key])
if (fnmatch.fnmatch(filen, fn_dict[fn_key])):
#print('did i get here')
if (f.SOPInstanceUID not in image_dict[f.TriggerTime][f.SliceLocation][fn_key].keys()):
image_dict[f.TriggerTime][f.SliceLocation][fn_key][f.SOPInstanceUID] = [filePath]
#print(image_dict[fn_key])
if (f.TriggerTime not in trigger_time):
trigger_time.append(f.TriggerTime)
if (f.SliceLocation not in slice_location):
slice_location.append(f.SliceLocation)
#print(slice_location, trigger_time)
print(sorted(image_dict[image_dict.keys()[0]].keys()))
"""
for time in sorted(trigger_time):
new_dict[time] = {}
for loc in sorted(slice_location):
new_dict[time][loc] = {}
for var in fn_dict.keys():
new_dict[time][loc][var] = {}
for image in image_dict.keys():
new_dict[time][loc][var][image] = {}
if (image_dict[image][0] == var and
image_dict[image][1] == time and
image_dict[image][2] == loc):
new_dict[time][loc][var][image] = image_dict[image]
print(new_dict)
for key in dict_test.keys():
print(key, dict_test[key])
print(count)
""" | bsd-2-clause |
ephes/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
rahul003/mxnet | example/autoencoder/mnist_sae.py | 18 | 4630 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import argparse
import logging
import mxnet as mx
import numpy as np
import data
from autoencoder import AutoEncoderModel
parser = argparse.ArgumentParser(description='Train an auto-encoder model for mnist dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--print-every', type=int, default=1000,
help='interval of printing during training.')
parser.add_argument('--batch-size', type=int, default=256,
help='batch size used for training.')
parser.add_argument('--pretrain-num-iter', type=int, default=50000,
help='number of iterations for pretraining.')
parser.add_argument('--finetune-num-iter', type=int, default=100000,
help='number of iterations for fine-tuning.')
parser.add_argument('--visualize', action='store_true',
help='whether to visualize the original image and the reconstructed one.')
parser.add_argument('--num-units', type=str, default="784,500,500,2000,10",
help='number of hidden units for the layers of the encoder.'
'The decoder layers are created in the reverse order. First dimension '
'must be 784 (28x28) to match mnist image dimension.')
parser.add_argument('--gpu', action='store_true',
help='whether to start training on GPU.')
# set to INFO to see less information during training
logging.basicConfig(level=logging.INFO)
opt = parser.parse_args()
logging.info(opt)
print_every = opt.print_every
batch_size = opt.batch_size
pretrain_num_iter = opt.pretrain_num_iter
finetune_num_iter = opt.finetune_num_iter
visualize = opt.visualize
gpu = opt.gpu
layers = [int(i) for i in opt.num_units.split(',')]
if __name__ == '__main__':
xpu = mx.gpu() if gpu else mx.cpu()
print("Training on {}".format("GPU" if gpu else "CPU"))
ae_model = AutoEncoderModel(xpu, layers, pt_dropout=0.2, internal_act='relu',
output_act='relu')
X, _ = data.get_mnist()
train_X = X[:60000]
val_X = X[60000:]
ae_model.layerwise_pretrain(train_X, batch_size, pretrain_num_iter, 'sgd', l_rate=0.1,
decay=0.0, lr_scheduler=mx.lr_scheduler.FactorScheduler(20000, 0.1),
print_every=print_every)
ae_model.finetune(train_X, batch_size, finetune_num_iter, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.lr_scheduler.FactorScheduler(20000, 0.1), print_every=print_every)
ae_model.save('mnist_pt.arg')
ae_model.load('mnist_pt.arg')
print("Training error:", ae_model.eval(train_X))
print("Validation error:", ae_model.eval(val_X))
if visualize:
try:
from matplotlib import pyplot as plt
from model import extract_feature
# sample a random image
original_image = X[np.random.choice(X.shape[0]), :].reshape(1, 784)
data_iter = mx.io.NDArrayIter({'data': original_image}, batch_size=1, shuffle=False,
last_batch_handle='pad')
# reconstruct the image
reconstructed_image = extract_feature(ae_model.decoder, ae_model.args,
ae_model.auxs, data_iter, 1,
ae_model.xpu).values()[0]
print("original image")
plt.imshow(original_image.reshape((28, 28)))
plt.show()
print("reconstructed image")
plt.imshow(reconstructed_image.reshape((28, 28)))
plt.show()
except ImportError:
logging.info("matplotlib is required for visualization")
| apache-2.0 |
Pragmatismo/Pigrow | scripts/gui/graph_modules/graph_picture_bar.py | 1 | 11897 |
def read_graph_options():
'''
Returns a dictionary of settings and their default values for use by the remote gui
'''
graph_module_settings_dict = {
"use_val_set":"day average", # day min, day max, day range, day average, last
"title_extra":"",
"pic_to_use":"flower.png",
"middle_method":"repeat", # stretch, repeat
"top_cut_location":"175",
"lower_cut_location":"335",
"label_above":"true",
"show_background":"true",
"show_axis":"true",
"rotate_label":"false"
}
return graph_module_settings_dict
def make_graph(data_sets, graph_path, ymax="", ymin="", size_h="", size_v="", dh="", th="", tc="", dc="", extra={}):
print("Making a pretty picture graph...")
#import the tools we'll be using
import os
import sys
import datetime
import matplotlib
matplotlib.use('agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import AxesImage
from matplotlib.transforms import Bbox, TransformedBbox, BboxTransformTo
# settings
if extra == {}:
extra = read_graph_options()
use_val_set = extra['use_val_set'].lower()
title_extra = extra['title_extra'].lower()
pic_to_use = extra['pic_to_use'].lower()
middle_method = extra['middle_method'].lower()
top_cut_location = int(extra['top_cut_location'])
lower_cut_location = int(extra['lower_cut_location'])
label_above = extra['label_above'].lower()
show_background = extra['show_background'].lower()
show_axis = extra['show_axis'].lower()
rotate_label = extra['rotate_label'].lower()
#middle_method = "stretch"
#middle_method = "repeat"
#
# #
# # # sort the data into the bits we need
# #
#
# make a dictionary containing every day's list of dates and values'
def make_dict_of_sets(date_list, value_list, key_list):
dictionary_of_sets = {}
for log_item_pos in range(0, len(date_list)):
day_group = date_list[log_item_pos].strftime("%Y:%m:%d")
log_time = date_list[log_item_pos]
if day_group in dictionary_of_sets:
# If there's already an entry for this day
# Read existing lists of dates and values
values_to_graph = dictionary_of_sets[day_group][1]
dates_to_graph = dictionary_of_sets[day_group][0]
# add current value and date to lists
values_to_graph.append(value_list[log_item_pos])
dates_to_graph.append(log_time)
else:
# if there's no entry for this day yet
# create new date and value lists if the day_group doesn't exists yet
values_to_graph = [value_list[log_item_pos]]
dates_to_graph = [log_time]
# put the lists of values and dates into the dictionary of sets under the daygroup key
dictionary_of_sets[day_group]=[dates_to_graph, values_to_graph]
return dictionary_of_sets
#
# loop through each day's set finding min-max values and adding them to lists.
def make_min_max_vals_lists(dictionary_of_days):
day_names = []
day_min_val = []
day_max_val = []
day_range = []
day_averages= []
counted = []
counter = 1
for key, value in dictionary_of_days.items():
days_date_list = value[0]
days_value_list = value[1]
#find min max values
val_range=0
min_val = days_value_list[0]
max_val = days_value_list[0]
total_of_vals = 0
for x in range(1, len(days_value_list)):
current_value = days_value_list[x]
if current_value > max_val:
max_val = current_value
if current_value < min_val:
min_val = current_value
total_of_vals = total_of_vals + current_value
val_range = max_val - min_val
day_average = total_of_vals / len(days_value_list)
# add to lists
counted.append(counter)
counter += 1
day_names.append(key)
day_min_val.append(min_val)
day_max_val.append(max_val)
day_range.append(round(val_range, 2))
day_averages.append(round(day_average, 2))
if use_val_set == "day min":
values = day_min_val
if use_val_set == "day max":
values = day_max_val
if use_val_set == "day range":
values = day_range
if use_val_set == "day average":
values = day_averages
return counted, values, day_names
#
class PrettyBarImage(AxesImage):
# This loads the image and stretches it neatly
zorder = 1
def __init__(self, ax, bbox, *, extent=(0, 1, 0, 1), **kwargs):
super().__init__(ax, extent=extent, **kwargs)
self._bbox = bbox
self.set_transform(BboxTransformTo(bbox))
def draw(self, renderer, *args, **kwargs):
# load image
pic_to_use_path = os.path.join(os.getcwd(), "graph_modules", pic_to_use)
original_image = plt.imread(pic_to_use_path)
#top_cut_location = original_image.shape[0] - 130 #600
#lower_cut_location = original_image.shape[0] - 100 #600
# determine amount it needs stretching by checking to see how deformed the bounding box is
stretch_factor = round(self._bbox.height / self._bbox.width, 0)
ny = int(stretch_factor * original_image.shape[1])
#print(" --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---")
#print("sizes -- original image; ", original_image.shape[0], original_image.shape[1], " bbox; ", self._bbox.height, self._bbox.width)
#print("stretch factor; ", stretch_factor, "unrounded", self._bbox.height / self._bbox.width, " approximation of new height ", ny )
#
# try to find correct amount to repeat
#
# determine size of removed portion
size_of_top_portion = top_cut_location
size_of_lower_portion = original_image.shape[0] - lower_cut_location
portion_of_image_removed = size_of_top_portion + size_of_lower_portion
portion_remains = original_image.shape[0] - portion_of_image_removed
#print("portion sizes; ", size_of_top_portion, size_of_lower_portion, " size of removed ", portion_of_image_removed, " size of remaining ", portion_remains, " added together ", portion_of_image_removed + portion_remains)
new_h_approx = self._bbox.height * stretch_factor
new_h_minus_removed = new_h_approx - portion_of_image_removed
#print("new size of stretch secton", new_h_minus_removed, "old siize of stretch section", portion_remains)
ratio_of_new_to_old = round(new_h_minus_removed / portion_remains, 0)
#print("Ratio new h to old h", ratio_of_new_to_old)
#
ratio_of_image_to_removed = portion_remains / original_image.shape[0]
#print( "ratio of image to removed ", ratio_of_image_to_removed)
new_section_size = ratio_of_image_to_removed * ny
#print("size of multiplied section ", new_section_size, "total vertical size of new area ", new_section_size + portion_remains)
# find how much the middle section needs stretching or repeting
new_y_minus_r_portions = ny - portion_of_image_removed
ratio_of_change = new_y_minus_r_portions / portion_remains
#print("ratio of change ", ratio_of_change, portion_remains, new_y_minus_r_portions)
increase_size_of_removed_bit = portion_remains * stretch_factor
#print(ny, " -------- ", increase_size_of_removed_bit, increase_size_of_removed_bit + portion_of_image_removed)
# stretch image if it needs it
if self.get_array() is None or self.get_array().shape[0] != ny:
# slice image into two pices
top_part = original_image[:top_cut_location]
middle_part = original_image[top_cut_location:lower_cut_location]
lower_part = original_image[lower_cut_location:]
# stretch the lower portion when that options selected
if middle_method == "stretch":
if new_y_minus_r_portions > 0 and new_h_minus_removed > 0 and new_h_minus_removed > portion_remains:
edited_middle_part = np.repeat(middle_part, ratio_of_new_to_old, axis=0)
else:
edited_middle_part = middle_part
if middle_method == "repeat":
edited_middle_part = middle_part.copy()
for x in range(1, int(ratio_of_new_to_old)):
edited_middle_part = np.append(edited_middle_part, middle_part, axis=0)
# put it back together and display
arr = np.vstack([top_part, edited_middle_part, lower_part])
self.set_array(arr)
super().draw(renderer, *args, **kwargs)
# define graph space
fig, ax = plt.subplots(figsize=(size_h, size_v))
for x in data_sets:
date_list = x[0]
value_list = x[1]
key_list = x[2]
if use_val_set == "last":
counted = [1]
values = [value_list[-1]]
day_names = [str(date_list[-1])]
else:
dict_of_sets = make_dict_of_sets(date_list, value_list, key_list)
counted, values, day_names = make_min_max_vals_lists(dict_of_sets)
# cycle through each value creating a correctly scaled image and placing it on the bar graph
minh = values[0]
maxh = values[0]
for days_date, h, key in zip(counted, values, day_names):
print(" -- Making bar for " + key)
# get graph extents for y limit
if h > maxh:
maxh = h
if h < minh:
minh = h
# find bounding box for image
start_bar = days_date - 0.40
end_bar = days_date + 0.40
bbox0 = Bbox.from_extents(start_bar, 0., end_bar, h)
bbox = TransformedBbox(bbox0, ax.transData)
# add image to grpah
ax.add_artist(PrettyBarImage(ax, bbox, interpolation="bicubic"))
# write value above bar
if label_above == "true":
text = str(h) + "\n" + key
ax.annotate(str(h), (days_date, h), va="bottom", ha="center")
if rotate_label == "true":
ax.text(days_date, 0, str(key), rotation=45,va='top',ha='right')
else:
ax.text(days_date, 0, str(key),va='top',ha='center')
if show_axis == "false":
ax.axis('off')
plt.title(use_val_set + " " + title_extra + "\n")
ax.set_xlim(counted[0] - 0.5, counted[-1] + 0.5)
ax.set_ylim(0, maxh + 1)
plt.xticks([])
# add a pretty color to the background
if show_background == "true":
background_gradient = np.zeros((2, 2, 4))
background_gradient[:, :, :3] = [1, 1, 0]
background_gradient[:, :, 3] = [[0.1, 0.3], [0.3, 0.5]] # alpha channel
ax.imshow(background_gradient, interpolation="bicubic", zorder=0.1,
extent=(0, 1, 0, 1), transform=ax.transAxes, aspect="auto")
# save the graph and tidy up our workspace
plt.savefig(graph_path)
print("pretty day bars created and saved to " + graph_path)
plt.close(fig)
| gpl-3.0 |
yhpeng-git/mxnet | example/kaggle-ndsb1/submission_dsb.py | 15 | 4287 | from __future__ import print_function
import pandas as pd
import os
import time as time
## Receives an array with probabilities for each class (columns) X images in test set (as listed in test.lst) and formats in Kaggle submission format, saves and compresses in submission_path
def gen_sub(predictions,test_lst_path="test.lst",submission_path="submission.csv"):
## append time to avoid overwriting previous submissions
## submission_path=time.strftime("%Y%m%d%H%M%S_")+submission_path
### Make submission
## check sampleSubmission.csv from kaggle website to view submission format
header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# read first line to know the number of columns and column to use
img_lst = pd.read_csv(test_lst_path,sep="/",header=None, nrows=1)
columns = img_lst.columns.tolist() # get the columns
cols_to_use = columns[len(columns)-1] # drop the last one
cols_to_use= map(int, str(cols_to_use)) ## convert scalar to list
img_lst= pd.read_csv(test_lst_path,sep="/",header=None, usecols=cols_to_use) ## reads lst, use / as sep to goet last column with filenames
img_lst=img_lst.values.T.tolist()
df = pd.DataFrame(predictions,columns = header, index=img_lst)
df.index.name = 'image'
print("Saving csv to %s" % submission_path)
df.to_csv(submission_path)
print("Compress with gzip")
os.system("gzip -f %s" % submission_path)
print(" stored in %s.gz" % submission_path)
| apache-2.0 |
JasonNK/udacity-dlnd | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
akionakamura/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
sperez8/iLab-viz | utils.py | 1 | 30998 | from datetime import datetime, timedelta, date
from pandas import notnull
import itertools
import re
import pandas as pd
def fix_time(time_start,current_time):
"""This function fixes the timestamps used by converting them to seconds, starting at zero.
Args:
time_start (str): The time at which the first action was done.
current_time (str): The current time we are converting.
Returns:
current_time_fixed: The time of the current action converted into seconds given that the first action was done as 0 seconds.
"""
#check that the current time is later than first time stamp
if datetime.combine(date.min, current_time) >= datetime.combine(date.min, time_start):
current_time_fixed = (datetime.combine(date.min, current_time) - datetime.combine(date.min, time_start)).total_seconds()
else:
#ex: this is the case when time_start is 59:00 min and cuurent_time is 02:00 min, so we add an hour to find the duration in between
current_time_fixed = (datetime.combine(date.min, current_time) + timedelta(hours=1) - datetime.combine(date.min, time_start)).total_seconds()
return current_time_fixed
def calculate_duration(row):
"""This function gets the duration of an action given the difference in time
between the current and next timestamp.
Args:
row (Pandas element): The row of the action for which we want to find the duration.
Returns:
duration: The difference in time in seconds between the Timeshifted and Time variables.
"""
if notnull(row['Timeshifted']): #check that this is not the last action which will have a NA Timeshifted value
#check that the time of the next action is indeed later than time of the current actin
if datetime.combine(date.min, row['Timeshifted']) >= datetime.combine(date.min, row['Time']):
duration = (datetime.combine(date.min, row['Timeshifted']) - datetime.combine(date.min, row['Time'])).total_seconds()
else:
#ex: this is the case when TimeSHifted is 59:00 min and Time is 02:00 min, so we add an hour to find the duration in between
duration = (datetime.combine(date.min, row['Timeshifted']) + timedelta(hours=1) - datetime.combine(date.min, row['Time'])).total_seconds()
else:
duration = 10 #last action lasts zero seconds but we need to put a dummy variable here.
return duration
opt_combos3 = {'none choose... all':'all',
'none all choose...':'choose...',
'all choose... none':'none',
'all none choose...':'choose...',
'choose... none all':'all',
'choose... all none':'none',
'all all all':'all',
'none none none':'none',
'choose... choose... choose...':'choose...'}
opt_combos2 = {'none all':'all',
'none choose...':'choose...',
'all none':'none',
'all choose...':'choose...',
'choose... all':'all',
'choose... none':'none',
'all all':'all',
'choose... choose...':'choose...',
'none none':'none'}
symbol_combos3 = {'x - +' : '+','x - /' : '/','x + -' : '-','x + /' : '/','x / -' : '-','x / +' : '+','- x +' : '+','- x /' : '/','- + x' : 'x','- + /' : '/','- / x' : 'x','- / +' : '+','+ x -' : '-','+ x /' : '/','+ - x' : 'x','+ - /' : '/','+ / x' : 'x','+ / -' : '-','/ x -' : '-','/ x +' : '+','/ - x' : 'x','/ - +' : '+','/ + x' : 'x','/ + -' : '-','- - -':'-','+ + +':'+','/ / /':'/','x x x':'x'}
symbol_combos2 = {'x -' : '-','x +' : '+','x /' : '/','- x' : 'x','- +' : '+','- /' : '/','+ x' : 'x','+ -' : '-','+ /' : '/','/ x' : 'x','/ -' : '-','/ +' : '+','- -' : '-','+ +' : '+','/ /' : '/','x x' : 'x'}
functions_combos2 = {"Average Average":"Average",
"Sum Sum":"Sum",
"Count Count":"Count",
"Median Median":"Median",
"Average Sum":"Average",
"Sum Sum":"Sum",
"Count Sum":"Count",
"Median Sum":"Median",
"Average Average":"Average",
"Sum Average":"Average",
"Count Average":"Average",
"Median Average":"Average",
"Average Count":"Count",
"Sum Count":"Count",
"Count Count":"Count",
"Median Count":"Count",
"Average Median":"Median",
"Sum Median":"Median",
"Count Median":"Median",
"Median Median":"Median"}
def clean_method(method):
method = method.replace("}","").replace("{","").replace("Use","")
method = re.sub(' +',' ',method) #remove extra spaces
for combo,replacement in opt_combos3.items():
method = method.replace(combo,replacement)
for combo,replacement in opt_combos2.items():
method = method.replace(combo,replacement)
for combo,replacement in symbol_combos3.items():
method = method.replace(combo,replacement)
for combo,replacement in symbol_combos2.items():
method = method.replace(combo,replacement)
for combo,replacement in functions_combos2.items():
method = method.replace(combo,replacement)
return method
def clean_coords(coords_brocken_up):
#since the coordinates all subsequent in time, we want to merge them to clean them up.
#For example:
# coords_brocken_up = [(0,2),(2,5),(7,2)]
# coords -> [(0,9)]
while True:
coords = merge_usage(coords_brocken_up,coords_brocken_up)
if coords == coords_brocken_up:
return coords
else:
coords_brocken_up = coords
#Using the example used for sketch.
def prepare_session(df,sessionid):
new_df = df[df['Session Id'] == sessionid]
# Next we filter out all actions with "INCORRECT" outcomes
before = new_df.shape[0]
new_df = new_df[new_df['Outcome'] == 'CORRECT']
# print "After removin 'incorrect' actions, we are left with {0} rows out of {1}".format(new_df.shape[0],before)
# We also clean up the methods removing annoying characters like "{"
new_df['Cleaned method 1'] = new_df[['Method_Recognized_1_Copied']].applymap(lambda method:clean_method(method))
new_df['Cleaned method 2'] = new_df[['Method_Recognized_2_Copied']].applymap(lambda method:clean_method(method))
# We create a column with the data for the contrasting cases
new_df['cases'] = new_df['CF(new1)'].str.replace('"','') +','+ new_df['CF(new2)'].str.replace('"','')
# Next we fix the time logs and convert them to seconds. We also recalculate the time between actions now that we have gotten rid of incorrect actions.
time_start = list(new_df['Time'])[0]
new_df['Time_seconds'] = new_df[['Time']].applymap(lambda current_time: fix_time(time_start,current_time))
new_df['Timeshifted'] = new_df[['Time']].shift(-1)
new_df['Duration'] = new_df[['Time','Timeshifted']].apply(calculate_duration, axis=1)
return new_df
def action_usage(df,column,action):
'''Given an action or method, we detect its use using a particular column
and then extract a list of time coordinates for when
they were used. These coordinates are in the format (start_time, duration)
Args:
df (Pandas dataframe): The dataframe to search in.
column (str): The column where the method or action might be logged.
action (str): The name of the action or method to search for in the column.
Returns:
A list of tuples with start times of the action and it's duration [(start1,duration1),(start2,duration2),...]
'''
return zip(df[df[column].str.contains(action,na=False)]['Time_seconds'],df[df[column].str.contains(action,na=False)]['Duration'])
def action_usage_exact(df,column,action):
'''Given an action or method, we detect its exact use (no combined action or method) using a particular column
and then extract a list of time coordinates for when
they were used. These coordinates are in the format (start_time, duration)
Args:
df (Pandas dataframe): The dataframe to search in.
column (str): The column where the method or action might be logged.
action (str): The name of the action or method to search for in the column.
Returns:
A list of tuples with start times of the action and it's duration [(start1,duration1),(start2,duration2),...]
'''
return zip(df[df[column].str.match(action,as_indexer=True)]['Time_seconds'],df[df[column].str.match(action,as_indexer=True)]['Duration'])
def merge_usage(x,y):
'''
Given two lists of coordinates, we merged them and return the new coordinates.
These coordinates are in the format (start_time, duration)
Args:
x (list): One set of coordinates
y (list): A second set of coordinates
Returns:
A list of tuples with merged start and duration coordinates [(start1,duration1),(start2,duration2),...]
For example:
x = [(0,1),(2,3),(10,3)] #0,2,3,4,10,11,12
y = [(0,2),(3,1),(9,2),(12,2)] #0,1,3,9,10,12,13
then merged(x,y) => [(0, 2), (2, 3), (9, 5)] #0,1,2,3,4,9,10,11,12,13
'''
x = x+y #we put all the coordinates in one list
x = list(set(x)) #remove duplicate coordinates
x.sort() #sort them by start time
if len(x)==1:
return x
merged = []
#for pairs of coordinates, we check if we can merged them
for i,(s1,d1) in enumerate(x):
if i != len(x)-1:
s2,d2 = x[i+1] #get next coordinates
# print s1,d1,s2,d2
if s1 == s2: #if same start times, find max duration
merged.append((s1,max(d1,d2)))
x.remove((s2,d2))
elif s1+d1 >= s2+d2: #if one coordinate bounds the other
merged.append((s1,d1)) #we add that coordinate
x.remove((s2,d2)) #and remove the other
elif s1+d1 >= s2: # if they overlap
new_duration = d2 + s2-s1 #we calculate a new duration
merged.append((s1,new_duration)) #add the new coordinate with earliest start time
x.remove((s2,d2)) #and remove the other
else:
merged.append((s1,d1))
else:
#these are the last coordinates of x and haven't been merged yet,
# so we try to merge them with the previous coordinates
if s1 <= merged[-1][0]+merged[-1][1]:
new_start = merged[-1][0]
new_duration = d1 + s1 - merged[-1][0]
merged[-1] = (new_start,new_duration) #extend the duration of the last coordinate
else: #if it fails, then there is no overlap and we merge them
merged.append((s1,d1))
return merged
def intersect_usage(x,y):
'''
Given two lists of coordinates, we find the intersect of comon time coordinates and return the new coordinates.
These coordinates are in the format (start_time, duration)
Args:
x (list): One set of coordinates
y (list): A second set of coordinates
Returns:
A list of tuples with a intersect of start and duration coordinates [(start1,duration1),(start2,duration2),...]
For example:
x = [(0,1),(2,3),(10,3)] #0,2,3,4,10,11,12
y = [(0,2),(3,1),(9,2),(12,2)] #0,1,3,9,10,12,13
then intersect_usage(x,y) -> [(0, 1), (3, 1), (10, 1), (12, 1)] #0,3,10,12
'''
x.sort() #sort them by start time
y.sort()
intersect = []
#for pairs of coordinates, we check if we can capture intersect
while len(x) > 0 and len(y) > 0:
(sx,dx) = x[0]
(sy,dy) = y[0]
if sx == sy: #if same start times, find min duration
intersect.append((sx,min(dx,dy)))
if dx<dy:
x.pop(0)
else:
y.pop(0)
elif sx < sy and sx+dx > sy: # if they overlap
if sx+dx >= sy+dy: #if one coordinate bounds the other
intersect.append((sy,dy)) #we add that inner coordinate
y.pop(0) #and remove it
else: #if no bounding, then just overlap
intersect.append((sy,dx - (sy-sx))) #add the new coordinate with latest start time
x.pop(0) #and remove the earliest one
elif sy < sx and sy+dy > sx: # if they overlap (opposite scenario)
if sy+dy >= sx+dx: #if one coordinate bounds the other (opposite scenario)
intersect.append((sx,dx)) #we add that inner coordinate
x.pop(0) #and remove it
else:
intersect.append((sx,dy - (sx-sy))) #add the new coordinate with latest start time
y.pop(0) #and remove the earliest one
else:
#there was no intersect so we remove the earliest coordinate
if sx < sy:
x.pop(0)
else:
y.pop(0)
return intersect
def merge_method_usage(df, pattern):
# For merging whenever an action is used in either the right or leftset of the case
m1 = action_usage(df,'Cleaned method 1',pattern)
m2 = action_usage(df,'Cleaned method 2',pattern)
return merge_usage(m1,m2)
def all_cases(df):
'''Given a dataframe with students' activity, we extract all
the contrasting cases they were given as well as starting time and
length of time for which they were working on that case.
Args:
df (Pandas dataframe): The dataframe to search in.
Returns:
coordinates: a dictionary where the keys are the cases and values are time coordinates.
Cases are in the format ('1 2 3 6','2 3 6 7').
Time coordinates are in the format (start_time, duration)
'''
coordinates = {}
#get all possible cases
raw_cases = list(set(df['cases']))
for raw_case in raw_cases:
#clean them up:
case = tuple(raw_case.split(','))
#we get time coordinates the way we always do
coords_brocken_up = action_usage(df,'cases',raw_case)
coords = clean_coords(coords_brocken_up)
#coords should now be a list with 1 item of formt [(start,duration)]
if len(coords) == 1:
coordinates[case] = coords[0]
else:
raise ValueError('This case seems to be used more than once: '+case)
return coordinates
REGEX_SINGLE_VALUE_FIRST = "st\d \d(?:$|(?:\sst)|(?:\s[\-\+x\/]\s[A-Z]))"
# matches:
# st1 5
# st1 5 + Ave...
# st1 5 - Cou...
REGEX_SINGLE_VALUE_SECOND = "st\d [A-Z][\sa-z]+ [\-\+x\/] \d(?:$|(?:\s?st))"
# matches:
# st1 Average all + 5 st
# st2 Count all - 5
def single_value_usage(df):
usage= []
method1 = action_usage(df,'Cleaned method 1',REGEX_SINGLE_VALUE_FIRST)
usage.extend(action_usage(df,'Cleaned method 2',REGEX_SINGLE_VALUE_FIRST))
usage.extend(action_usage(df,'Cleaned method 1',REGEX_SINGLE_VALUE_SECOND))
usage.extend(action_usage(df,'Cleaned method 2',REGEX_SINGLE_VALUE_SECOND))
return clean_coords(usage)
REGEX_AVERAGE = "(?:Average all)|(?:Average choose\.\.\.(?:\s[(?:{{0}})])+)"
REGEX_SUM = "(?:Sum all)|(?:Sum choose\.\.\.(?:\s[(?:{{0}})])+)"
REGEX_MEDIAN = "(?:Median all)|(?:Median choose\.\.\.(?:\s[(?:{{0}})])+)"
# matches:
# Average all
# Sum choose... x y z #where x,y,z are numbers from the case
def central_tendency_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = coords[1]
lcase = [str(int(x)) for x in case[0].split(" ")]
rcase = [str(int(x)) for x in case[1].split(" ")]
lcase.sort()
rcase.sort()
average = action_usage(df, 'Cleaned method 1' ,REGEX_AVERAGE.format('|'.join(lcase)))
sumall = action_usage(df, 'Cleaned method 1' ,REGEX_SUM.format('|'.join(lcase)))
median = action_usage(df, 'Cleaned method 1' ,REGEX_MEDIAN.format('|'.join(lcase)))
merging = merge_usage(average,sumall)
cent1 = merge_usage(merging, median)
average = action_usage(df, 'Cleaned method 2' ,REGEX_AVERAGE.format('|'.join(rcase)))
sumall = action_usage(df, 'Cleaned method 2' ,REGEX_SUM.format('|'.join(rcase)))
median = action_usage(df, 'Cleaned method 2' ,REGEX_MEDIAN.format('|'.join(rcase)))
merging = merge_usage(average,sumall)
cent2 = merge_usage(merging, median)
# and keep only the times that fall within the current case
cent1_for_case = intersect_usage(cent1,[coords])
cent2_for_case = intersect_usage(cent2,[coords])
# Merge when it's used on both cases
usage.extend(clean_coords(merge_usage(cent1_for_case,cent2_for_case)))
usage.sort()
return usage
def regex_count_gaps(gapvalues):
pattern = "Count\s?( choose\.\.\.)?(\s[({0})])+".format('|'.join(gapvalues))
return pattern
# matches:
# Count choose... x y z
# Count x y z
def count_gaps_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = coords[1]
lcase = case[0].split(" ")
rcase = case[1].split(" ")
lcase.sort()
rcase.sort()
#get gap values for the regex
gapvalues_left = set(range( int(min(lcase)) , int(max(lcase)) )) - set([int(x) for x in lcase])
gapvalues_right = set(range( int(min(rcase)) , int(max(rcase)) )) - set([int(x) for x in rcase])
#get all times that the range is used somewhere the method
if len(gapvalues_left)>0:
re_left = regex_count_gaps([str(x) for x in gapvalues_left])
range1 = action_usage(df,'Cleaned method 1',re_left)
else:
range1 = action_usage(df,'Cleaned method 1',"Count none")
if len(gapvalues_right)>0:
re_right = regex_count_gaps([str(x) for x in gapvalues_right])
range2 = action_usage(df,'Cleaned method 2',re_right)
else:
range2 = action_usage(df,'Cleaned method 2',"Count none")
# and keep only the times that fall within the current case
range1_for_case = intersect_usage(range1,[coords])
range2_for_case = intersect_usage(range2,[coords])
# Merge when it's used on both cases
usage.extend(clean_coords(merge_usage(range1_for_case,range2_for_case)))
usage.sort()
return usage
def evaluation_steps_usage(df):
# submit_usage = action_usage(df,"Selection","submit")
evaluation_usage = action_usage(df,"Selection","evaluation")
checkIntuition_usage = action_usage(df,"Selection","checkIntuition")
##do some merging
merged = merge_usage(evaluation_usage, checkIntuition_usage)
return merged
def case_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = 30+20+30
usage.append((start,end))
return usage
# def regex_extrapolated_range(case_max1,case_min1,case_max2,case_min2):
# pattern = "(?:st\d+ {0} \- {1} st\d+ {2} \- {3}|st\d+ {2} \- {3} st\d+ {0} \- {1}) st3 Step[12] [x\+\-\\\\] Step[12]".format(case_max1,case_min1,case_max2,case_min2)
# return pattern
# def extrapolated_range_usage(df):
# usage = []
# cases = all_cases(df)
# for case,coords in cases.items():
# start = coords[0]
# end = coords[1]
# lcase = case[0].split(" ")
# rcase = case[1].split(" ")
# lcase.sort()
# rcase.sort()
# #find min and maxes of cases for the regex
# lmin1,lmin2,lmax2,lmax1 = lcase[0],lcase[1],lcase[-2],lcase[-1]
# rmin1,rmin2,rmax2,rmax1 = rcase[0],rcase[1],rcase[-2],rcase[-1]
# #get all times that the range is used somewhere the method
# range1 = action_usage(df,'Cleaned method 1',regex_extrapolated_range(lmax1,lmin1,lmax2,lmin2))
# range2 = action_usage(df,'Cleaned method 2',regex_extrapolated_range(rmax1,rmin1,rmax2,rmin2))
# # print case, coords[0], coords[0]+coords[1]
# # print "rangel1:", regex_extrapolated_range(lmin1,lmax1)
# # print "rangel2:", regex_extrapolated_range(lmin2,lmax2)
# # print "ranger1:", regex_extrapolated_range(rmin1,rmax1)
# # print "ranger2:", regex_extrapolated_range(rmin2,rmax2)
# # and keep only the times that fall within the current case
# range1_for_case = intersect_usage(range1,[coords])
# range2_for_case = intersect_usage(range2,[coords])
# # Merge when it's used on both cases
# usage.extend(clean_coords(merge_usage(range1_for_case,range2_for_case)))
# usage.sort()
# return usage
def regex_range(case_min,case_max):
pattern = '{0} \- {1}'.format(case_max,case_min)
return pattern
#TO DO
# any range that's not min max
def range_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = coords[1]
#find min and maxes of cases for the regex
lmin,lmax = min(case[0].split(" ")),max(case[0].split(" "))
rmin,rmax = min(case[1].split(" ")),max(case[1].split(" "))
#get all times that the range is used
range1 = action_usage(df,'Cleaned method 1',regex_range(lmin,lmax))
range2 = action_usage(df,'Cleaned method 2',regex_range(rmin,rmax))
#keep only the times that fall within the current case
range1_for_case = intersect_usage(range1,[coords])
range2_for_case = intersect_usage(range2,[coords])
# Merge when it's used on both cases
usage.extend(clean_coords(merge_usage(range1_for_case,range2_for_case)))
usage.sort()
return usage
def regex_distance(v1,v2):
pattern = '{0} \- {1}'.format(v1,v2)
return pattern
# matches:
# x - y # where z and y are case numbers
def distance_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = coords[1]
lcase = case[0].split(" ")
rcase = case[1].split(" ")
left_values = set([int(x) for x in lcase])
right_values = set([int(x) for x in rcase])
distance1 = []
distance2 = []
lmin,lmax = min(left_values),max(left_values)
rmin,rmax = min(right_values),max(right_values)
for v1,v2 in list(itertools.combinations(left_values, 2)):
if (v1 == lmax and v2== lmin): #this is range so we ignore
continue
if (v2 == lmax and v1== lmin): #this is range so we ignore
continue
else:
distance1 = merge_usage(distance1,action_usage(df,'Cleaned method 1',regex_distance(v1,v2)))
distance1 = merge_usage(distance1,action_usage(df,'Cleaned method 1',regex_distance(v2,v1)))
for v1,v2 in list(itertools.combinations(right_values, 2)):
if (v1 == rmax and v2== rmin):
continue
if (v2 == rmax and v1== rmin):
continue
else:
distance2 = merge_usage(distance2,action_usage(df,'Cleaned method 2',regex_distance(v1,v2)))
distance2 = merge_usage(distance2,action_usage(df,'Cleaned method 2',regex_distance(v2,v1)))
# and keep only the times that fall within the current case
distance1_for_case = intersect_usage(distance1,[coords])
distance2_for_case = intersect_usage(distance2,[coords])
# Merge when it's used on both cases
usage.extend(clean_coords(merge_usage(distance1_for_case,distance2_for_case)))
usage.sort()
return usage
build_actions = ["add\d",
"button\d\_\d",
"delete\d",
"deleteAll\d",
"function\d",
"operator\d\_\d",
"pointsSelection\d",
"Selection",
"step\d\_\d"]
def build_events(df):
usage = []
for re_build in build_actions:
building = action_usage(df,'Selection',re_build)
usage.extend(building)
#since these are actions - not episodes, we give them all a duration of 2 seconds
usage = [(x,2) for x,y in usage]
usage = clean_coords(usage)
usage.sort()
return usage
def regex_all_numbers(case_numbers):
return ''.join(["(?=.*"+x+")" for x in case_numbers])
REGEX_COUNT_ALL = "Count (?:all)|(?:choose\.\.\.{0})"
def count_all_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = coords[1]
lcase = [str(int(x)) for x in case[0].split(" ")]
rcase = [str(int(x)) for x in case[1].split(" ")]
lcase.sort()
rcase.sort()
count_left = action_usage(df, 'Cleaned method 1' ,REGEX_COUNT_ALL.format(regex_all_numbers(lcase)))
count_right = action_usage(df, 'Cleaned method 2' ,REGEX_COUNT_ALL.format(regex_all_numbers(rcase)))
count_case_left = intersect_usage(count_left,[coords])
count_case_right = intersect_usage(count_right,[coords])
usage.extend(clean_coords(merge_usage(count_case_right,count_case_left)))
return usage
def multiplication_usage(df):
return merge_method_usage(df,'[a-zA-Z0-9(?: all)(?: choose)\.]+ x [a-zA-Z0-9(?: all)(?: choose)\.]+')
def addition_usage(df):
return merge_method_usage(df,'[a-zA-Z0-9(?: all)(?: choose)\.]+ \+ [a-zA-Z0-9(?: all)(?: choose)\.]+')
# matches:
# Count choose... 2 3 x Sum all
# Average all + Sum all
# Count choose... 2 3 x 4
def combo_central_tendency_usage(df):
usage = []
cases = all_cases(df)
for case,coords in cases.items():
start = coords[0]
end = coords[1]
lcase = [str(int(x)) for x in case[0].split(" ")]
rcase = [str(int(x)) for x in case[1].split(" ")]
lcase.sort()
rcase.sort()
average = action_usage(df, 'Cleaned method 1' ,REGEX_AVERAGE.format('|'.join(lcase)))
sumall = action_usage(df, 'Cleaned method 1' ,REGEX_SUM.format('|'.join(lcase)))
median = action_usage(df, 'Cleaned method 1' ,REGEX_MEDIAN.format('|'.join(lcase)))
combo_cent1 = []
# find any intersections of a combo of central tendency methods
for c1,c2 in list(itertools.combinations([average,sumall,median], 2)):
combo_cent1.extend(intersect_usage(c1,c2))
average = action_usage(df, 'Cleaned method 2' ,REGEX_AVERAGE.format('|'.join(rcase)))
sumall = action_usage(df, 'Cleaned method 2' ,REGEX_SUM.format('|'.join(rcase)))
median = action_usage(df, 'Cleaned method 2' ,REGEX_MEDIAN.format('|'.join(rcase)))
combo_cent2 = []
# find any intersections of a combo of central tendency methods
for c1,c2 in list(itertools.combinations([average,sumall,median], 2)):
combo_cent2.extend(intersect_usage(c1,c2))
# and keep only the times that fall within the current case
combo_cent1_for_case = intersect_usage(combo_cent1,[coords])
combo_cent2_for_case = intersect_usage(combo_cent2,[coords])
# Merge when it's used on both cases
usage.extend(clean_coords(merge_usage(combo_cent1_for_case,combo_cent2_for_case)))
usage.sort()
return usage
def other_usage(df):
mult = multiplication_usage(df)
# add = addition_usage(df)
combo_cent = combo_central_tendency_usage(df)
usage = merge_usage(mult,combo_cent)
# usage.extend(add)
all_methods = [single_value_usage,central_tendency_usage,range_usage,distance_usage,count_gaps_usage]
for m1,m2 in list(itertools.combinations(all_methods, 2)):
usage.extend(intersect_usage(m1(df),m2(df)))
return usage
def get_key_ideas(df):
def format_time(t):
mins = int(t/60)
secs = int(t-mins*60)
return '{0}:{1}'.format(mins,secs)
#merge method from right and left side
df['Cleaned joined methods'] = df['Cleaned method 1'].map(str) + ' | ' + df['Cleaned method 2']
#edit actions so weither they are from the right side or left doesn't matter
# ie. delete1 and delete2 -> delete
df['Selection_unsided'] = [s.replace('1','').replace('2','') for s in list(df['Selection'].map(str))]
#remove all consecutive duplicate actions
# ie. delete delete delete -> delete NaN NaN
df['Selection_unsided'] = df['Selection_unsided'].loc[df['Selection_unsided'].shift() != df['Selection_unsided']]
#remove all consecutive duplicate methods
df['Cleaned joined methods'] = df['Cleaned joined methods'].loc[df['Cleaned joined methods'].shift() != df['Cleaned joined methods']]
#shift the Selection (or student action) so that we can find their methods BEFORE they delete or submit.
df['Selection_unsided_shifted'] = df['Selection_unsided'].shift(-1)
#Now we can find all submitted ideas on first submit and first delete
# we don't need to make it as a "set" anymore!
# submitted_ideas = set(df[df['Selection'].str.contains('submit',na=False)]['Cleaned joined methods'])
# deleted_all_ideas = set(df[df['Selection'].str.contains('deleteAll',na=False)]['Cleaned joined methods'])
submitted_ideas = df[df['Selection_unsided_shifted'].str.contains('submit',na=False)][['Selection_unsided_shifted','Time_seconds','cases','Cleaned joined methods']]
deleted_ideas = df[df['Selection_unsided_shifted'].str.contains('delete',na=False)][['Selection_unsided_shifted','Time_seconds','cases','Cleaned joined methods']]
ideas = pd.concat([deleted_ideas,submitted_ideas])
#remove empty methods
ideas = ideas[ideas['Cleaned joined methods'].str.contains("st1 | st1", regex=False) == False]
ideas.replace(',',' | ',regex=True, inplace=True)
#Let's add rows to separate cases:
raw_cases = list(set(df['cases']))
num_cases = len(raw_cases)
df_cases = pd.DataFrame()
df_cases['Selection_unsided_shifted'] = ['get new case']*num_cases
df_cases['Time_seconds'] = [action_usage(df,'cases',case)[0][0] for case in raw_cases]
df_cases['cases'] = ['-']*num_cases
df_cases['Cleaned joined methods'] = ['-']*num_cases
ideas = pd.concat([ideas, df_cases])
#sort by time and clean format the time in minutes and seconds
ideas.sort_values(by='Time_seconds',inplace=True)
ideas['timestamp'] = ideas[['Time_seconds']].applymap(lambda t: format_time(t))
ideas.reset_index(drop=True, inplace=True)
ideas.rename(columns = {'Selection_unsided_shifted':'action','Cleaned joined methods':'tried methods'}, inplace = True)
return ideas[['action','timestamp','cases','tried methods']] | gpl-3.0 |
shubham0d/smc | salvus/sage_salvus.py | 1 | 120803 | ##################################################################################
# #
# Extra code that the Salvus server makes available in the running Sage session. #
# #
##################################################################################
#########################################################################################
# Copyright (C) 2013 William Stein <wstein@gmail.com> #
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
import copy, os, sys, types
# This reduces a lot of confusion for Sage worksheets -- people expect
# to be able to import from the current working directory.
sys.path.append('.')
salvus = None
import json
from uuid import uuid4
def uuid():
return str(uuid4())
##########################################################################
# New function interact implementation
##########################################################################
import inspect
interacts = {}
def jsonable(x):
"""
Given any object x, make a JSON-able version of x, doing as best we can.
For some objects, sage as Sage integers, this works well. For other
objects which make no sense in Javascript, we get a string.
"""
import sage.all
try:
json.dumps(x)
return x
except:
if isinstance(x, (sage.all.Integer)):
return int(x)
else:
return str(x)
class InteractCell(object):
def __init__(self, f, layout=None, width=None, style=None,
update_args=None, auto_update=True,
flicker=False, output=True):
"""
Given a function f, create an object that describes an interact
for working with f interactively.
INPUT:
- `f` -- Python function
- ``width`` -- (default: None) overall width of the interact canvas
- ``style`` -- (default: None) extra CSS style to apply to canvas
- ``update_args`` -- (default: None) only call f if one of the args in
this list of strings changes.
- ``auto_update`` -- (default: True) call f every time an input changes
(or one of the argus in update_args).
- ``flicker`` -- (default: False) if False, the output part of the cell
never shrinks; it can only grow, which aleviates flicker.
- ``output`` -- (default: True) if False, do not automatically
provide any area to display output.
"""
self._flicker = flicker
self._output = output
self._uuid = uuid()
# Prevent garbage collection until client specifically requests it,
# since we want to be able to store state.
interacts[self._uuid] = self
self._f = f
self._width = jsonable(width)
self._style = str(style)
(args, varargs, varkw, defaults) = inspect.getargspec(f)
if defaults is None:
defaults = []
n = len(args) - len(defaults)
self._controls = dict([(arg, interact_control(arg, defaults[i-n] if i >= n else None))
for i, arg in enumerate(args)])
self._last_vals = {}
for arg in args:
self._last_vals[arg] = self._controls[arg].default()
self._ordered_args = args
self._args = set(args)
if isinstance(layout, dict):
# Implement the layout = {'top':, 'bottom':, 'left':,
# 'right':} dictionary option that is in the Sage
# notebook. I personally think it is really awkward and
# unsuable, but there may be many interacts out there that
# use it.
# Example layout={'top': [['a', 'b'], ['x', 'y']], 'left': [['c']], 'bottom': [['d']]}
top = layout.get('top', [])
bottom = layout.get('bottom', [])
left = layout.get('left', [])
right = layout.get('right', [])
new_layout = []
for row in top:
new_layout.append(row)
if len(left) > 0 and len(right) > 0:
new_layout.append(left[0] + [''] + right[0])
del left[0]
del right[0]
elif len(left) > 0 and len(right) == 0:
new_layout.append(left[0] + [''])
del left[0]
elif len(left) == 0 and len(right) > 0:
new_layout.append([''] + right[0])
del right[0]
i = 0
while len(left) > 0 and len(right) > 0:
new_layout.append(left[0] + ['_salvus_'] + right[0])
del left[0]
del right[0]
while len(left) > 0:
new_layout.append(left[0])
del left[0]
while len(right) > 0:
new_layout.append(right[0])
del right[0]
for row in bottom:
new_layout.append(row)
layout = new_layout
if layout is None:
layout = [[(str(arg), 12, None)] for arg in self._ordered_args]
else:
try:
v = []
for row in layout:
new_row = []
for x in row:
if isinstance(x, str):
x = (x,)
if len(x) == 1:
new_row.append((str(x[0]), 12//len(row), None))
elif len(x) == 2:
new_row.append((str(x[0]), int(x[1]), None))
elif len(x) == 3:
new_row.append((str(x[0]), int(x[1]), str(x[2])))
v.append(new_row)
layout = v
except:
raise ValueError, "layout must be None or a list of tuples (variable_name, width, [optional label]), where width is an integer between 1 and 12, variable_name is a string, and label is a string. The widths in each row must add up to at most 12. The empty string '' denotes the output area."
# Append a row for any remaining controls:
layout_vars = set(sum([[x[0] for x in row] for row in layout],[]))
for v in args:
if v not in layout_vars:
layout.append([(v, 12, None)])
if self._output:
if '' not in layout_vars:
layout.append([('', 12, None)])
self._layout = layout
# TODO -- this is UGLY
if not auto_update:
c = button('Update')
c._opts['var'] = 'auto_update'
self._controls['auto_update'] = c
self._ordered_args.append("auto_update")
layout.append([('auto_update',2)])
update_args = ['auto_update']
self._update_args = update_args
def jsonable(self):
"""
Return a JSON-able description of this interact, which the client
can use for laying out controls.
"""
X = {'controls':[self._controls[arg].jsonable() for arg in self._ordered_args], 'id':self._uuid}
if self._width is not None:
X['width'] = self._width
if self._layout is not None:
X['layout'] = self._layout
X['style'] = self._style
X['flicker'] = self._flicker
return X
def __call__(self, vals):
"""
Call self._f with inputs specified by vals. Any input variables not
specified in vals will have the value they had last time.
"""
self.changed = [str(x) for x in vals.keys()]
for k, v in vals.iteritems():
x = self._controls[k](v)
self._last_vals[k] = x
if self._update_args is not None:
do_it = False
for v in self._update_args:
if v in self.changed:
do_it = True
if not do_it:
return
interact_exec_stack.append(self)
try:
self._f(**dict([(k,self._last_vals[k]) for k in self._args]))
finally:
interact_exec_stack.pop()
class InteractFunction(object):
def __init__(self, interact_cell):
self.__dict__['interact_cell'] = interact_cell
def __call__(self, **kwds):
salvus.clear()
for arg, value in kwds.iteritems():
self.__setattr__(arg, value)
return self.interact_cell(kwds)
def __setattr__(self, arg, value):
I = self.__dict__['interact_cell']
if arg in I._controls and not isinstance(value, control):
# setting value of existing control
v = I._controls[arg].convert_to_client(value)
desc = {'var':arg, 'default':v}
I._last_vals[arg] = value
else:
# create a new control
new_control = interact_control(arg, value)
I._controls[arg] = new_control
desc = new_control.jsonable()
# set the id of the containing interact
desc['id'] = I._uuid
salvus.javascript("worksheet.set_interact_var(obj)", obj=jsonable(desc))
def __getattr__(self, arg):
I = self.__dict__['interact_cell']
try:
return I._last_vals[arg]
except Exception, err:
print err
raise AttributeError("no interact control corresponding to input variable '%s'"%arg)
def __delattr__(self, arg):
I = self.__dict__['interact_cell']
try:
del I._controls[arg]
except KeyError:
pass
desc = {'id':I._uuid, 'name':arg}
salvus.javascript("worksheet.del_interact_var(obj)", obj=jsonable(desc))
def changed(self):
"""
Return the variables that changed since last evaluation of the interact function
body. [SALVUS only]
For example::
@interact
def f(n=True, m=False, xyz=[1,2,3]):
print n, m, xyz, interact.changed()
"""
return self.__dict__['interact_cell'].changed
class _interact_layout:
def __init__(self, *args):
self._args = args
def __call__(self, f):
return interact(f, *self._args)
class Interact(object):
"""
Use interact to create interactive worksheet cells with sliders,
text boxes, radio buttons, check boxes, color selectors, and more.
Put ``@interact`` on the line before a function definition in a
cell by itself, and choose appropriate defaults for the variable
names to determine the types of controls (see tables below). You
may also put ``@interact(layout=...)`` to control the layout of
controls. Within the function, you may explicitly set the value
of the control corresponding to a variable foo to bar by typing
interact.foo = bar.
Type "interact.controls.[tab]" to get access to all of the controls.
INPUT:
- ``f`` -- function
- ``width`` -- number, or string such as '80%', '300px', '20em'.
- ``style`` -- CSS style string, which allows you to change the border,
background color, etc., of the interact.
- ``update_args`` -- (default: None); list of strings, so that
only changing the corresponding controls causes the function to
be re-evaluated; changing other controls will not cause an update.
- ``auto_update`` -- (default: True); if False, a button labeled
'Update' will appear which you can click on to re-evalute.
- ``layout`` -- (default: one control per row) a list [row0,
row1, ...] of lists of tuples row0 = [(var_name, width,
label), ...], where the var_name's are strings, the widths
must add up to at most 12, and the label is optional. This
will layout all of the controls and output using Twitter
Bootstraps "Fluid layout", with spans corresponding
to the widths. Use var_name='' to specify where the output
goes, if you don't want it to last. You may specify entries for
controls that you will create later using interact.var_name = foo.
NOTES: The flicker and layout options above are only in SALVUS.
For backwards compatibility with the Sage notebook, if layout
is a dictionary (with keys 'top', 'bottom', 'left', 'right'),
then the appropriate layout will be rendered as it used to be
in the Sage notebook.
OUTPUT:
- creates an interactive control.
AUTOMATIC CONTROL RULES
-----------------------
There are also some defaults that allow you to make controls
automatically without having to explicitly specify them. E.g.,
you can make ``x`` a continuous slider of values between ``u`` and
``v`` by just writing ``x=(u,v)`` in the argument list.
- ``u`` - blank input_box
- ``u=elt`` - input_box with ``default=element``, unless other rule below
- ``u=(umin,umax)`` - continuous slider (really `100` steps)
- ``u=(umin,umax,du)`` - slider with step size ``du``
- ``u=list`` - buttons if ``len(list)`` at most `5`; otherwise, drop down
- ``u=generator`` - a slider (up to `10000` steps)
- ``u=bool`` - a checkbox
- ``u=Color('blue')`` - a color selector; returns ``Color`` object
- ``u=matrix`` - an ``input_grid`` with ``to_value`` set to
``matrix.parent()`` and default values given by the matrix
- ``u=(default, v)`` - ``v`` anything as above, with given ``default`` value
- ``u=(label, v)`` - ``v`` anything as above, with given ``label`` (a string)
EXAMPLES:
The layout option::
@interact(layout={'top': [['a', 'b']], 'left': [['c']],
'bottom': [['d']], 'right':[['e']]})
def _(a=x^2, b=(0..20), c=100, d=x+1, e=sin(2)):
print a+b+c+d+e
We illustrate some features that are only in Salvus, not in the
Sage cell server or Sage notebook.
You can set the value of a control called foo to 100 using
interact.foo=100. For example::
@interact
def f(n=20, twice=None):
interact.twice = int(n)*2
In this example, we create and delete multiple controls depending
on properties of the input::
@interact
def f(n=20, **kwds):
print kwds
n = Integer(n)
if n % 2 == 1:
del interact.half
else:
interact.half = input_box(n/2, readonly=True)
if n.is_prime():
interact.is_prime = input_box('True', readonly=True)
else:
del interact.is_prime
You can access the value of a control associated to a variable foo
that you create using interact.foo, and check whether there is a
control associated to a given variable name using hasattr::
@interact
def f():
if not hasattr(interact, 'foo'):
interact.foo = 'hello'
else:
print interact.foo
An indecisive interact::
@interact
def f(n=selector(['yes', 'no'])):
for i in range(5):
interact.n = i%2
sleep(.2)
We use the style option to make a holiday interact::
@interact(width=25,
style="background-color:lightgreen; border:5px dashed red;")
def f(x=button('Merry ...',width=20)):
pass
We make a little box that can be dragged around, resized, and is
updated via a computation (in this case, counting primes)::
@interact(width=30,
style="background-color:lightorange; position:absolute; z-index:1000; box-shadow : 8px 8px 4px #888;")
def f(prime=text_control(label="Counting primes: ")):
salvus.javascript("cell.element.closest('.salvus-cell-output-interact').draggable().resizable()")
p = 2
c = 1
while True:
interact.prime = '%s, %.2f'%(p, float(c)/p)
p = next_prime(p)
c += 1
sleep(.25)
"""
def __call__(self, f=None, layout=None, width=None, style=None, update_args=None, auto_update=True, flicker=False, output=True):
if f is None:
return _interact_layout(layout, width, style, update_args, auto_update, flicker)
else:
return salvus.interact(f, layout=layout, width=width, style=style,
update_args=update_args, auto_update=auto_update, flicker=flicker, output=output)
def __setattr__(self, arg, value):
I = interact_exec_stack[-1]
if arg in I._controls and not isinstance(value, control):
# setting value of existing control
v = I._controls[arg].convert_to_client(value)
desc = {'var':arg, 'default':v}
I._last_vals[arg] = value
else:
# create a new control
new_control = interact_control(arg, value)
I._controls[arg] = new_control
desc = new_control.jsonable()
desc['id'] = I._uuid
salvus.javascript("worksheet.set_interact_var(obj)", obj=desc)
def __delattr__(self, arg):
try:
del interact_exec_stack[-1]._controls[arg]
except KeyError:
pass
desc['id'] = I._uuid
salvus.javascript("worksheet.del_interact_var(obj)", obj=jsonable(arg))
def __getattr__(self, arg):
try:
return interact_exec_stack[-1]._last_vals[arg]
except Exception, err:
raise AttributeError("no interact control corresponding to input variable '%s'"%arg)
def changed(self):
"""
Return the variables that changed since last evaluation of the interact function
body. [SALVUS only]
For example::
@interact
def f(n=True, m=False, xyz=[1,2,3]):
print n, m, xyz, interact.changed()
"""
return interact_exec_stack[-1].changed
interact = Interact()
interact_exec_stack = []
class control:
def __init__(self, control_type, opts, repr, convert_from_client=None, convert_to_client=jsonable):
# The type of the control -- a string, used for CSS selectors, switches, etc.
self._control_type = control_type
# The options that define the control -- passed to client
self._opts = dict(opts)
# Used to print the control to a string.
self._repr = repr
# Callable that the control may use in converting from JSON
self._convert_from_client = convert_from_client
self._convert_to_client = convert_to_client
self._last_value = self._opts['default']
def convert_to_client(self, value):
try:
return self._convert_to_client(value)
except Exception, err:
sys.stderr.write("%s -- %s\n"%(err, self))
sys.stderr.flush()
return jsonable(value)
def __call__(self, obj):
"""
Convert JSON-able object returned from client to describe
value of this control.
"""
if self._convert_from_client is not None:
try:
x = self._convert_from_client(obj)
except Exception, err:
sys.stderr.write("%s -- %s\n"%(err, self))
sys.stderr.flush()
x = self._last_value
else:
x = obj
self._last_value = x
return x
def __repr__(self):
return self._repr
def label(self):
"""Return the label of this control."""
return self._opts['label']
def default(self):
"""Return default value of this control."""
return self(self._opts['default'])
def type(self):
"""Return type that values of this control are coerced to."""
return self._opts['type']
def jsonable(self):
"""Return JSON-able object the client browser uses to render the control."""
X = {'control_type':self._control_type}
for k, v in self._opts.iteritems():
X[k] = jsonable(v)
return X
import types
def list_of_first_n(v, n):
"""Given an iterator v, return first n elements it produces as a list."""
if not hasattr(v, 'next'):
v = v.__iter__()
w = []
while n > 0:
try:
w.append(v.next())
except StopIteration:
return w
n -= 1
return w
def automatic_control(default):
from sage.all import Color
from sage.structure.element import is_Matrix
label = None
default_value = None
for _ in range(2):
if isinstance(default, tuple) and len(default) == 2 and isinstance(default[0], str):
label, default = default
if isinstance(default, tuple) and len(default) == 2 and isinstance(default[1], (tuple, list, types.GeneratorType)):
default_value, default = default
if isinstance(default, control):
if label:
default._opts['label'] = label
return default
elif isinstance(default, str):
return input_box(default, label=label, type=str)
elif isinstance(default, bool):
return checkbox(default, label=label)
elif isinstance(default, list):
return selector(default, default=default_value, label=label, buttons=len(default) <= 5)
elif isinstance(default, types.GeneratorType):
return slider(list_of_first_n(default, 10000), default=default_value, label=label)
elif isinstance(default, Color):
return color_selector(default=default, label=label)
elif isinstance(default, tuple):
if len(default) == 2:
return slider(default[0], default[1], default=default_value, label=label)
elif len(default) == 3:
return slider(default[0], default[1], default[2], default=default_value, label=label)
else:
return slider(list(default), default=default_value, label=label)
elif is_Matrix(default):
return input_grid(default.nrows(), default.ncols(), default=default.list(), to_value=default.parent(), label=label)
else:
return input_box(default, label=label)
def interact_control(arg, value):
if isinstance(value, control):
if value._opts['label'] is None:
value._opts['label'] = arg
c = value
else:
c = automatic_control(value)
if c._opts['label'] is None:
c._opts['label'] = arg
c._opts['var'] = arg
return c
def sage_eval(x, locals=None):
x = str(x).strip()
if x.isspace():
return None
from sage.all import sage_eval
return sage_eval(x, locals=locals)
class ParseValue:
def __init__(self, type):
self._type = type
def _eval(self, value):
return sage_eval(value, locals=None if salvus is None else salvus.namespace)
def __call__(self, value):
from sage.all import Color
if self._type is None:
return self._eval(value)
elif self._type is str:
return str(value)
elif self._type is Color:
try:
return Color(value)
except ValueError:
try:
return Color("#"+value)
except ValueError:
raise TypeError("invalid color '%s'"%value)
else:
return self._type(self._eval(value))
def input_box(default=None, label=None, type=None, nrows=1, width=None, readonly=False, submit_button=None):
"""
An input box interactive control for use with the :func:`interact` command.
INPUT:
- default -- default value
- label -- label test
- type -- the type that the input is coerced to (from string)
- nrows -- (default: 1) the number of rows of the box
- width -- width; how wide the box is
- readonly -- is it read-only?
- submit_button -- defaults to true if nrows > 1 and false otherwise.
"""
return control(
control_type = 'input-box',
opts = locals(),
repr = "Input box",
convert_from_client = ParseValue(type)
)
def checkbox(default=True, label=None, readonly=False):
"""
A checkbox interactive control for use with the :func:`interact` command.
"""
return control(
control_type = 'checkbox',
opts = locals(),
repr = "Checkbox"
)
def color_selector(default='blue', label=None, readonly=False, widget=None, hide_box=False):
"""
A color selector.
SALVUS only: the widget option is ignored -- SALVUS only provides
bootstrap-colorpicker.
EXAMPLES::
@interact
def f(c=color_selector()):
print c
"""
from sage.all import Color
default = Color(default).html_color()
return control(
control_type = 'color-selector',
opts = locals(),
repr = "Color selector",
convert_from_client = lambda x : Color(str(x)),
convert_to_client = lambda x : Color(x).html_color()
)
def text_control(default='', label=None, classes=None):
"""
A read-only control that displays arbitrary HTML amongst the other
interact controls. This is very powerful, since it can display
any HTML.
INPUT::
- ``default`` -- actual HTML to display
- ``label`` -- string or None
- ``classes`` -- space separated string of CSS classes
EXAMPLES::
We output the factorization of a number in a text_control::
@interact
def f(n=2013, fact=text_control("")):
interact.fact = factor(n)
We use a CSS class to make the text_control look like a button:
@interact
def f(n=text_control("foo <b>bar</b>", classes='btn')):
pass
We animate a picture into view:
@interact
def f(size=[10,15,..,30], speed=[1,2,3,4]):
for k in range(size):
interact.g = text_control("<img src='http://sagemath.org/pix/sage_logo_new.png' width=%s>"%(20*k))
sleep(speed/50.0)
"""
return control(
control_type = 'text',
opts = locals(),
repr = "Text %r"%(default)
)
def button(default=None, label=None, classes=None, width=None, icon=None):
"""
Create a button. [SALVUS only]
You can tell that pressing this button triggered the interact
evaluation because interact.changed() will include the variable
name tied to the button.
INPUT:
- ``default`` -- value variable is set to
- ``label`` -- string (default: None)
- ``classes`` -- string if None; if given, space separated
list of CSS classes. e.g., Bootstrap CSS classes such as:
btn-primary, btn-info, btn-success, btn-warning, btn-danger,
btn-link, btn-large, btn-small, btn-mini.
See http://twitter.github.com/bootstrap/base-css.html#buttons
If button_classes a single string, that class is applied to all buttons.
- ``width`` - an integer or string (default: None); if given,
all buttons are this width. If an integer, the default units
are 'ex'. A string that specifies any valid HTML units (e.g., '100px', '3em')
is also allowed [SALVUS only].
- ``icon`` -- None or string name of any icon listed at the font
awesome website (http://fortawesome.github.com/Font-Awesome/), e.g., 'fa-repeat'
EXAMPLES::
@interact
def f(hi=button('Hello', label='', classes="btn-primary btn-large"),
by=button("By")):
if 'hi' in interact.changed():
print "Hello to you, good sir."
if 'by' in interact.changed():
print "See you."
Some buttons with icons::
@interact
def f(n=button('repeat', icon='fa-repeat'),
m=button('see?', icon="fa-eye", classes="btn-large")):
print interact.changed()
"""
return control(
control_type = "button",
opts = locals(),
repr = "Button",
convert_from_client = lambda x : default,
convert_to_client = lambda x : str(x)
)
class Slider:
def __init__(self, start, stop, step_size, max_steps):
if isinstance(start, (list, tuple)):
self.vals = start
else:
if step_size is None:
if stop is None:
step_size = start/float(max_steps)
else:
step_size = (stop-start)/float(max_steps)
from sage.all import srange # sage range is much better/more flexible.
self.vals = srange(start, stop, step_size, include_endpoint=True)
# Now check to see if any of thee above constructed a list of
# values that exceeds max_steps -- if so, linearly interpolate:
if len(self.vals) > max_steps:
n = len(self.vals)//max_steps
self.vals = [self.vals[n*i] for i in range(len(self.vals)//n)]
def to_client(self, val):
if val is None:
return 0
if isinstance(val, (list, tuple)):
return [self.to_client(v) for v in val]
else:
# Find index into self.vals of closest match.
try:
return self.vals.index(val) # exact match
except ValueError:
pass
z = [(abs(val-x),i) for i, x in enumerate(self.vals)]
z.sort()
return z[0][1]
def from_client(self, val):
if val is None:
return self.vals[0]
# val can be a n-tuple or an integer
if isinstance(val, (list, tuple)):
return tuple([self.vals[v] for v in val])
else:
return self.vals[int(val)]
class InputGrid:
def __init__(self, nrows, ncols, default, to_value):
self.nrows = nrows
self.ncols = ncols
self.to_value = to_value
self.value = copy.deepcopy(self.adapt(default))
def adapt(self, x):
if not isinstance(x, list):
return [[x for _ in range(self.ncols)] for _ in range(self.nrows)]
elif not all(isinstance(elt, list) for elt in x):
return [[x[i * self.ncols + j] for j in xrange(self.ncols)] for i in xrange(self.nrows)]
else:
return x
def from_client(self, x):
if len(x) == 0:
self.value = []
elif isinstance(x[0], list):
self.value = [[sage_eval(t) for t in z] for z in x]
else:
# x is a list of (unicode) strings -- we sage eval them all at once (instead of individually).
s = '[' + ','.join([str(t) for t in x]) + ']'
v = sage_eval(s)
self.value = [v[n:n+self.ncols] for n in range(0, self.nrows*self.ncols, self.ncols)]
return self.to_value(self.value) if self.to_value is not None else self.value
def to_client(self, x=None):
if x is None:
v = self.value
else:
v = self.adapt(x)
self.value = v # save value in our local cache
return [[repr(x) for x in y] for y in v]
def input_grid(nrows, ncols, default=0, label=None, to_value=None, width=5):
r"""
A grid of input boxes, for use with the :func:`interact` command.
INPUT:
- ``nrows`` - an integer
- ``ncols`` - an integer
- ``default`` - an object; the default put in this input box
- ``label`` - a string; the label rendered to the left of the box.
- ``to_value`` - a list; the grid output (list of rows) is
sent through this function. This may reformat the data or
coerce the type.
- ``width`` - an integer; size of each input box in characters
EXAMPLES:
Solving a system::
@interact
def _(m = input_grid(2,2, default = [[1,7],[3,4]],
label=r'$M\qquad =$', to_value=matrix, width=8),
v = input_grid(2,1, default=[1,2],
label=r'$v\qquad =$', to_value=matrix)):
try:
x = m.solve_right(v)
html('$$%s %s = %s$$'%(latex(m), latex(x), latex(v)))
except:
html('There is no solution to $$%s x=%s$$'%(latex(m), latex(v)))
Squaring an editable and randomizable matrix::
@interact
def f(reset = button('Randomize', classes="btn-primary", icon="fa-th"),
square = button("Square", icon="fa-external-link"),
m = input_grid(4,4,default=0, width=5, label="m =", to_value=matrix)):
if 'reset' in interact.changed():
print "randomize"
interact.m = [[random() for _ in range(4)] for _ in range(4)]
if 'square' in interact.changed():
salvus.tex(m^2)
"""
ig = InputGrid(nrows, ncols, default, to_value)
return control(
control_type = 'input-grid',
opts = {'default' : ig.to_client(),
'label' : label,
'width' : width,
'nrows' : nrows,
'ncols' : ncols},
repr = "Input Grid",
convert_from_client = ig.from_client,
convert_to_client = ig.to_client
)
def slider(start, stop=None, step=None, default=None, label=None,
display_value=True, max_steps=500, step_size=None, range=False,
width=None, animate=True):
"""
An interactive slider control for use with :func:`interact`.
There are several ways to call the slider function, but they all
take several named arguments:
- ``default`` - an object (default: None); default value is closest
value. If range=True, default can also be a 2-tuple (low, high).
- ``label`` -- string
- ``display_value`` -- bool (default: True); whether to display the
current value to the right of the slider.
- ``max_steps`` -- integer, default: 500; this is the maximum
number of values that the slider can take on. Do not make
it too large, since it could overwhelm the client. [SALVUS only]
- ``range`` -- bool (default: False); instead, you can select
a range of values (lower, higher), which are returned as a
2-tuple. You may also set the value of the slider or
specify a default value using a 2-tuple.
- ``width`` -- how wide the slider appears to the user [SALVUS only]
- ``animate`` -- True (default), False,"fast", "slow", or the
duration of the animation in milliseconds. [SALVUS only]
You may call the slider function as follows:
- slider([list of objects], ...) -- slider taking values the objects in the list
- slider([start,] stop[, step]) -- slider over numbers from start
to stop. When step is given it specifies the increment (or
decrement); if it is not given, then the number of steps equals
the width of the control in pixels. In all cases, the number of
values will be shrunk to be at most the pixel_width, since it is
not possible to select more than this many values using a slider.
EXAMPLES::
Use one slider to modify the animation speed of another::
@interact
def f(speed=(50,100,..,2000), x=slider([1..50], animate=1000)):
if 'speed' in interact.triggers():
print "change x to have speed", speed
del interact.x
interact.x = slider([1..50], default=interact.x, animate=speed)
return
"""
if step_size is not None: # for compat with sage
step = step_size
slider = Slider(start, stop, step, max_steps)
vals = [str(x) for x in slider.vals] # for display by the client
if range and default is None:
default = [0, len(vals)-1]
return control(
control_type = 'range-slider' if range else 'slider',
opts = {'default' : slider.to_client(default),
'label' : label,
'animate' : animate,
'vals' : vals,
'display_value' : display_value,
'width' : width},
repr = "Slider",
convert_from_client = slider.from_client,
convert_to_client = slider.to_client
)
def range_slider(*args, **kwds):
"""
range_slider is the same as :func:`slider`, except with range=True.
EXAMPLES:
A range slider with a constraint::
@interact
def _(t = range_slider([1..1000], default=(100,200), label=r'Choose a range for $\alpha$')):
print t
"""
kwds['range'] = True
return slider(*args, **kwds)
def selector(values, label=None, default=None,
nrows=None, ncols=None, width=None, buttons=False,
button_classes=None):
"""
A drop down menu or a button bar for use in conjunction with
the :func:`interact` command. We use the same command to
create either a drop down menu or selector bar of buttons,
since conceptually the two controls do exactly the same thing
- they only look different. If either ``nrows`` or ``ncols``
is given, then you get a buttons instead of a drop down menu.
INPUT:
- ``values`` - either (1) a list [val0, val1, val2, ...] or (2)
a list of pairs [(val0, lbl0), (val1,lbl1), ...] in which case
all labels must be given -- use None to auto-compute a given label.
- ``label`` - a string (default: None); if given, this label
is placed to the left of the entire button group
- ``default`` - an object (default: first); default value in values list
- ``nrows`` - an integer (default: None); if given determines
the number of rows of buttons; if given, buttons=True
- ``ncols`` - an integer (default: None); if given determines
the number of columns of buttons; if given, buttons=True
- ``width`` - an integer or string (default: None); if given,
all buttons are this width. If an integer, the default units
are 'ex'. A string that specifies any valid HTML units (e.g., '100px', '3em')
is also allowed [SALVUS only].
- ``buttons`` - a bool (default: False, except as noted
above); if True, use buttons
- ``button_classes`` - [SALVUS only] None, a string, or list of strings
of the of same length as values, whose entries are a whitespace-separated
string of CSS classes, e.g., Bootstrap CSS classes such as:
btn-primary, btn-info, btn-success, btn-warning, btn-danger,
btn-link, btn-large, btn-small, btn-mini.
See http://twitter.github.com/bootstrap/base-css.html#buttons
If button_classes a single string, that class is applied to all buttons.
"""
if (len(values) > 0 and isinstance(values[0], tuple) and len(values[0]) == 2):
vals = [z[0] for z in values]
lbls = [str(z[1]) if z[1] is not None else None for z in values]
else:
vals = values
lbls = [None] * len(vals)
for i in range(len(vals)):
if lbls[i] is None:
v = vals[i]
lbls[i] = v if isinstance(v, str) else str(v)
if default is None:
default = 0
else:
try:
default = vals.index(default)
except IndexError:
default = 0
opts = dict(locals())
for k in ['vals', 'values', 'i', 'v', 'z']:
if k in opts:
del opts[k] # these could have a big jsonable repr
opts['lbls'] = lbls
return control(
control_type = 'selector',
opts = opts,
repr = "Selector labeled %r with values %s"%(label, values),
convert_from_client = lambda n : vals[int(n)],
convert_to_client = lambda x : vals.index(x)
)
interact_functions = {}
interact_controls = ['button', 'checkbox', 'color_selector', 'input_box',
'range_slider', 'selector', 'slider', 'text_control',
'input_grid']
for f in ['interact'] + interact_controls:
interact_functions[f] = globals()[f]
# A little magic so that "interact.controls.[tab]" shows all the controls.
class Controls:
pass
Interact.controls = Controls()
for f in interact_controls:
interact.controls.__dict__[f] = interact_functions[f]
##########################################################################################
# Cell object -- programatically control the current cell.
##########################################################################################
class Cell(object):
def id(self):
"""
Return the UUID of the cell in which this function is called.
"""
return salvus._id
def hide(self, component='input'):
"""
Hide the 'input' or 'output' component of a cell.
"""
salvus.hide(component)
def show(self, component='input'):
"""
Show the 'input' or 'output' component of a cell.
"""
salvus.show(component)
def hideall(self):
"""
Hide the input and output fields of the cell in which this code executes.
"""
salvus.hide('input')
salvus.hide('output')
#def input(self, val=None):
# """
# Get or set the value of the input component of the cell in
# which this code executes.
# """
# salvus.javascript("cell.set_input(obj)", obj=val)
#
#def output(self, val=None):
# """
# Get or set the value of the output component of the cell in
# which this code executes.
# """
# salvus.javascript("cell.set_output(obj)", obj=val)
# return salvus.output(val, self._id)
cell = Cell()
##########################################################################################
# Cell decorators -- aka "percent modes"
##########################################################################################
import sage.misc.html
try:
_html = sage.misc.html.HTML()
except:
_html = sage.misc.html.HTMLFragmentFactory
class HTML:
"""
Cell mode that renders everything after %html as HTML then hides
the input (unless you pass in hide=False).
EXAMPLES::
---
%html
<h1>A Title</h1>
<h2>Subtitle</h2>
---
%html(hide=False)
<h1>A Title</h1>
<h2>Subtitle</h2>
---
%html("<h1>A title</h1>", hide=False)
---
%html(hide=False) <h1>Title</h1>
"""
def __init__(self, hide=True):
self._hide = hide
def __call__(self, *args, **kwds):
if len(kwds) > 0 and len(args) == 0:
return HTML(**kwds)
if len(args) > 0:
self._render(args[0], **kwds)
def _render(self, s, hide=None):
if hide is None:
hide = self._hide
if hide:
salvus.hide('input')
salvus.html(s)
def table(self):
raise NotImplementedError, "html.table not implemented in SageMathCloud yet"
html = HTML()
html.iframe = _html.iframe # written in a way that works fine
def coffeescript(s=None, once=False):
"""
Execute code using CoffeeScript.
For example:
%coffeescript console.log 'hi'
or
coffeescript("console.log 'hi'")
You may either pass in a string or use this as a cell decorator,
i.e., put %coffeescript at the top of a cell.
If you set once=False, the code will be executed every time the output of the cell is rendered, e.g.,
on load, like with %auto::
coffeescript('console.log("hi")', once=False)
or
%coffeescript(once=False)
console.log("hi")
EXTRA FUNCTIONALITY:
When executing code, a function called print is defined, and objects cell and worksheet.::
print(1,2,'foo','bar') -- displays the inputs in the output cell
cell -- has attributes cell.output (the html output box) and cell.cell_id
worksheet -- has attributes project_page and editor, and methods interrupt, kill, and
execute_code: (opts) =>
opts = defaults opts,
code : required
data : undefined
preparse : true
cb : undefined
OPTIMIZATION: When used alone as a cell decorator in a Sage worksheet
with once=False (the default), rendering is done entirely client side,
which is much faster, not requiring a round-trip to the server.
"""
if s is None:
return lambda s : salvus.javascript(s, once=once, coffeescript=True)
else:
return salvus.javascript(s, coffeescript=True, once=once)
def javascript(s=None, once=False):
"""
Execute code using JavaScript.
For example:
%javascript console.log('hi')
or
javascript("console.log('hi')")
You may either pass in a string or use this as a cell decorator,
i.e., put %javascript at the top of a cell.
If once=False (the default), the code will be executed every time the output of the
cell is rendered, e.g., on load, like with %auto::
javascript('.. some code ', once=False)
or
%javascript(once=False)
... some code
WARNING: If once=True, then this code is likely to get executed *before* the rest
of the output for this cell has been rendered by the client.
javascript('console.log("HI")', once=False)
EXTRA FUNCTIONALITY:
When executing code, a function called print is defined, and objects cell and worksheet.::
print(1,2,'foo','bar') -- displays the inputs in the output cell
cell -- has attributes cell.output (the html output box) and cell.cell_id
worksheet -- has attributes project_page and editor, and methods interrupt, kill, and
execute_code: (opts) =>
opts = defaults opts,
code : required
data : undefined
preparse : true
cb : undefined
This example illustrates using worksheet.execute_code::
%coffeescript
for i in [500..505]
worksheet.execute_code
code : "i=salvus.data['i']; i, factor(i)"
data : {i:i}
cb : (mesg) ->
if mesg.stdout then print(mesg.stdout)
if mesg.stderr then print(mesg.stderr)
OPTIMIZATION: When used alone as a cell decorator in a Sage worksheet
with once=False (the default), rendering is done entirely client side,
which is much faster, not requiring a round-trip to the server.
"""
if s is None:
return lambda s : salvus.javascript(s, once=once)
else:
return salvus.javascript(s, once=once)
javascript_exec_doc = r"""
To send code from Javascript back to the Python process to
be executed use the worksheet.execute_code function::
%javascript worksheet.execute_code(string_to_execute)
You may also use a more general call format of the form::
%javascript
worksheet.execute_code({code:string_to_execute, data:jsonable_object,
preparse:true or false, cb:function});
The data object is available when the string_to_execute is being
evaluated as salvus.data. For example, if you execute this code
in a cell::
javascript('''
worksheet.execute_code({code:"a = salvus.data['b']/2; print a", data:{b:5},
preparse:false, cb:function(mesg) { console.log(mesg)} });
''')
then the Python variable a is set to 2, and the Javascript console log will display::
Object {done: false, event: "output", id: "..."}
Object {stdout: "2\n", done: true, event: "output", id: "..."}
You can also send an interrupt signal to the Python process from
Javascript by calling worksheet.interrupt(), and kill the process
with worksheet.kill(). For example, here the a=4 never
happens (but a=2 does)::
%javascript
worksheet.execute_code({code:'a=2; sleep(100); a=4;',
cb:function(mesg) { worksheet.interrupt(); console.log(mesg)}})
or using CoffeeScript (a Javascript preparser)::
%coffeescript
worksheet.execute_code
code : 'a=2; sleep(100); a=4;'
cb : (mesg) ->
worksheet.interrupt()
console.log(mesg)
The Javascript code is evaluated with numerous standard Javascript libraries available,
including jQuery, Twitter Bootstrap, jQueryUI, etc.
"""
for s in [coffeescript, javascript]:
s.__doc__ += javascript_exec_doc
def latex0(s=None, **kwds):
"""
Create and display an arbitrary LaTeX document as a png image in the Salvus Notebook.
In addition to directly calling latex.eval, you may put %latex (or %latex.eval(density=75, ...etc...))
at the top of a cell, which will typeset everything else in the cell.
"""
if s is None:
return lambda t : latex0(t, **kwds)
import os
if 'filename' not in kwds:
import tempfile
delete_file = True
kwds['filename'] = tempfile.mkstemp(suffix=".png")[1]
else:
delete_file = False
if 'locals' not in kwds:
kwds['locals'] = salvus.namespace
if 'globals' not in kwds:
kwds['globals'] = salvus.namespace
sage.misc.latex.Latex.eval(sage.misc.latex.latex, s, **kwds)
salvus.file(kwds['filename'], once=False)
if delete_file:
os.unlink(kwds['filename'])
return ''
latex0.__doc__ += sage.misc.latex.Latex.eval.__doc__
class Time:
"""
Time execution of code exactly once in Salvus by:
- putting %time at the top of a cell to time execution of the entire cell
- put %time at the beginning of line to time execution of just that line
- write time('some code') to executation of the contents of the string.
If you want to time repeated execution of code for benchmarking purposes, use
the timeit command instead.
"""
def __init__(self, start=False):
if start:
from sage.all import walltime, cputime
self._start_walltime = walltime()
self._start_cputime = cputime()
def before(self, code):
return Time(start=True)
def after(self, code):
from sage.all import walltime, cputime
print "CPU time: %.2f s, Wall time: %.2f s"%( cputime(self._start_cputime), walltime(self._start_walltime))
self._start_cputime = self._start_walltime = None
def __call__(self, code):
from sage.all import walltime, cputime
not_as_decorator = self._start_cputime is None
if not_as_decorator:
self.before(code)
salvus.execute(code)
if not_as_decorator:
self.after(code)
time = Time()
def file(path):
"""
Block decorator to write to a file. Use as follows:
%file('filename') put this line in the file
or
%file('filename')
everything in the rest of the
cell goes into the file with given name.
As with all block decorators in Salvus, the arguments to file can
be arbitrary expressions. For examples,
a = 'file'; b = ['name', 'txt']
%file(a+b[0]+'.'+b[1]) rest of line goes in 'filename.txt'
"""
return lambda content: open(path,'w').write(content)
def timeit(*args, **kwds):
"""
Time execution of a command or block of commands.
This command has been enhanced for Salvus so you may use it as
a block decorator as well, e.g.,
%timeit 2+3
and
%timeit(number=10, preparse=False) 2^3
%timeit(number=10, seconds=True) 2^3
and
%timeit(preparse=False)
[rest of the cell]
Here is the original docstring for timeit:
"""
def go(code):
print sage.misc.sage_timeit.sage_timeit(code, globals_dict=salvus.namespace, **kwds)
if len(args) == 0:
return lambda code : go(code)
else:
go(*args)
# TODO: these need to also give the argspec
timeit.__doc__ += sage.misc.sage_timeit.sage_timeit.__doc__
class Capture:
"""
Capture or ignore the output from evaluating the given code. (SALVUS only).
Use capture as a block decorator by placing either %capture or
%capture(optional args) at the beginning of a cell or at the
beginning of a line. If you use just plain %capture then stdout
and stderr are completely ignored. If you use %capture(args)
you can redirect or echo stdout and stderr to variables or
files. For example if you start a cell with this line::
%capture(stdout='output', stderr=open('error','w'), append=True, echo=True)
then stdout is appended (because append=True) to the global
variable output, stderr is written to the file 'error', and the
output is still displayed in the output portion of the cell (echo=True).
INPUT:
- stdout -- string (or object with write method) to send stdout output to (string=name of variable)
- stderr -- string (or object with write method) to send stderr output to (string=name of variable)
- append -- (default: False) if stdout/stderr are a string, append to corresponding variable
- echo -- (default: False) if True, also echo stdout/stderr to the output cell.
"""
def __init__(self, stdout, stderr, append, echo):
self.v = (stdout, stderr, append, echo)
def before(self, code):
(stdout, stderr, append, echo) = self.v
self._orig_stdout_f = orig_stdout_f = sys.stdout._f
if stdout is not None:
if hasattr(stdout, 'write'):
def write_stdout(buf):
stdout.write(buf)
elif isinstance(stdout, str):
if (stdout not in salvus.namespace) or not append:
salvus.namespace[stdout] = ''
if not isinstance(salvus.namespace[stdout], str):
salvus.namespace[stdout] = str(salvus.namespace[stdout])
def write_stdout(buf):
salvus.namespace[stdout] += buf
else:
raise TypeError, "stdout must be None, a string, or have a write method"
def f(buf, done):
write_stdout(buf)
if echo:
orig_stdout_f(buf, done)
elif done:
orig_stdout_f('', done)
sys.stdout._f = f
elif not echo:
def f(buf,done):
if done:
orig_stdout_f('',done)
sys.stdout._f = f
self._orig_stderr_f = orig_stderr_f = sys.stderr._f
if stderr is not None:
if hasattr(stderr, 'write'):
def write_stderr(buf):
stderr.write(buf)
elif isinstance(stderr, str):
if (stderr not in salvus.namespace) or not append:
salvus.namespace[stderr] = ''
if not isinstance(salvus.namespace[stderr], str):
salvus.namespace[stderr] = str(salvus.namespace[stderr])
def write_stderr(buf):
salvus.namespace[stderr] += buf
else:
raise TypeError, "stderr must be None, a string, or have a write method"
def f(buf, done):
write_stderr(buf)
if echo:
orig_stderr_f(buf, done)
elif done:
orig_stderr_f('', done)
sys.stderr._f = f
elif not echo:
def f(buf,done):
if done:
orig_stderr_f('',done)
sys.stderr._f = f
return self
def __call__(self, code=None, stdout=None, stderr=None, append=False, echo=False):
if code is None:
return Capture(stdout=stdout, stderr=stderr, append=append, echo=echo)
salvus.execute(code)
def after(self, code):
sys.stdout._f = self._orig_stdout_f
sys.stderr._f = self._orig_stderr_f
capture = Capture(stdout=None, stderr=None, append=False, echo=False)
def cython(code=None, **kwds):
"""
Block decorator to easily include Cython code in the Salvus notebook.
Just put %cython at the top of a cell, and the rest is compiled as Cython code.
You can pass options to cython by typing "%cython(... var=value...)" instead.
This is a wrapper around Sage's cython function, whose docstring is:
"""
if code is None:
return lambda code: cython(code, **kwds)
import sage.misc.misc
path = sage.misc.misc.tmp_dir()
filename = os.path.join(path, 'a.pyx')
open(filename, 'w').write(code)
if 'annotate' not in kwds:
kwds['annotate'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **kwds)
try:
sys.path.insert(0,path)
module = __import__(modname)
finally:
del sys.path[0]
import inspect
for name, value in inspect.getmembers(module):
if not name.startswith('_'):
salvus.namespace[name] = value
files = os.listdir(path)
html_filename = None
for n in files:
base, ext = os.path.splitext(n)
if ext.startswith('.html') and '_pyx_' in base:
html_filename = os.path.join(path, n)
if html_filename is not None:
html_url = salvus.file(html_filename, raw=True, show=False)
salvus.html("<a href='%s' target='_new' class='btn btn-small' style='margin-top: 1ex'>Auto-generated code... <i class='fa fa-external-link'></i></a>"%html_url)
cython.__doc__ += sage.misc.cython.cython.__doc__
class script:
r"""
Block decorator to run an arbitrary shell command with input from a
cell in Salvus.
Put %script('shell command line') or %script(['command', 'arg1',
'arg2', ...]) by itself on a line in a cell, and the command line
is run with stdin the rest of the contents of the cell. You can
also use script in single line mode, e.g.,::
%script('gp -q') factor(2^97 - 1)
or
%script(['gp', '-q']) factor(2^97 - 1)
will launch a gp session, feed 'factor(2^97-1)' into stdin, and
display the resulting factorization.
NOTE: the result is stored in the attribute "stdout", so you can do::
s = script('gp -q')
%s factor(2^97-1)
s.stdout
'\n[11447 1]\n\n[13842607235828485645766393 1]\n\n'
and s.stdout will now be the output string.
You may also specify the shell environment with the env keyword.
"""
def __init__(self, args, env=None):
self._args = args
self._env = env
def __call__(self, code=''):
import subprocess
try:
s = None
s = subprocess.Popen(self._args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=isinstance(self._args, str),
env=self._env)
s.stdin.write(code); s.stdin.close()
finally:
if s is None:
return
try:
self.stdout = s.stdout.read()
sys.stdout.write(self.stdout)
finally:
try:
os.system("pkill -TERM -P %s"%s.pid)
except OSError:
pass
try:
os.kill(s.pid, 9)
except OSError:
pass
def python(code):
"""
Block decorator to run code in pure Python mode, without it being
preparsed by the Sage preparser. Otherwise, nothing changes.
To use this, put %python by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
disable preparsing just for that line.
"""
salvus.execute(code, preparse=False)
def python3(code):
"""
Block decorator to run code in a pure Python3 mode session.
To use this, put %python3 by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
run just that line using python3.
You can combine %python3 with capture, if you would like to capture
the output to a variable. For example::
%capture(stdout='p3')
%python3
x = set([1,2,3])
print(x)
Afterwards, p3 contains the output '{1, 2, 3}' and the variable x
in the controlling Sage session is in no way impacted.
NOTE: No state is preserved between calls. Each call is a separate process.
"""
script('sage-native-execute python3 -E')(code)
def perl(code):
"""
Block decorator to run code in a Perl session.
To use this, put %perl by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
run just that line using perl.
EXAMPLE:
A perl cell::
%perl
$apple_count = 5;
$count_report = "There are $apple_count apples.";
print "The report is: $count_report\n";
Or use %perl on one line::
%perl $apple_count = 5; $count_report = "There are $apple_count apples."; print "The report is: $count_report\n";
You can combine %perl with capture, if you would like to capture
the output to a variable. For example::
%capture(stdout='p')
%perl print "hi"
Afterwards, p contains 'hi'.
NOTE: No state is preserved between calls. Each call is a separate process.
"""
script('sage-native-execute perl')(code)
def ruby(code):
"""
Block decorator to run code in a Ruby session.
To use this, put %ruby by itself in a cell so that it applies to
the rest of the cell, or put it at the beginning of a line to
run just that line using ruby.
EXAMPLE:
A ruby cell::
%ruby
lang = "ruby"
print "Hello from #{lang}!"
Or use %ruby on one line::
%ruby lang = "ruby"; print "Hello from #{lang}!"
You can combine %ruby with capture, if you would like to capture
the output to a variable. For example::
%capture(stdout='p')
%ruby lang = "ruby"; print "Hello from #{lang}!"
Afterwards, p contains 'Hello from ruby!'.
NOTE: No state is preserved between calls. Each call is a separate process.
"""
script('sage-native-execute ruby')(code)
def fortran(x, library_paths=[], libraries=[], verbose=False):
"""
Compile Fortran code and make it available to use.
INPUT:
- x -- a string containing code
Use this as a decorator. For example, put this in a cell and evaluate it::
%fortran
C FILE: FIB1.F
SUBROUTINE FIB(A,N)
C
C CALCULATE FIRST N FIBONACCI NUMBERS
C
INTEGER N
REAL*8 A(N)
DO I=1,N
IF (I.EQ.1) THEN
A(I) = 0.0D0
ELSEIF (I.EQ.2) THEN
A(I) = 1.0D0
ELSE
A(I) = A(I-1) + A(I-2)
ENDIF
ENDDO
END
C END FILE FIB1.F
In the next cell, evaluate this::
import numpy
n = numpy.array(range(10),dtype=float)
fib(n,int(10))
n
This will produce this output: array([ 0., 1., 1., 2., 3., 5., 8., 13., 21., 34.])
"""
import __builtin__
from sage.misc.temporary_file import tmp_dir
if len(x.splitlines()) == 1 and os.path.exists(x):
filename = x
x = open(x).read()
if filename.lower().endswith('.f90'):
x = '!f90\n' + x
from numpy import f2py
from random import randint
# Create everything in a temporary directory
mytmpdir = tmp_dir()
try:
old_cwd = os.getcwd()
os.chdir(mytmpdir)
old_import_path = os.sys.path
os.sys.path.append(mytmpdir)
name = "fortran_module_%s"%randint(0,2**64) # Python module name
# if the first line has !f90 as a comment, gfortran will
# treat it as Fortran 90 code
if x.startswith('!f90'):
fortran_file = name + '.f90'
else:
fortran_file = name + '.f'
s_lib_path = ""
s_lib = ""
for s in library_paths:
s_lib_path = s_lib_path + "-L%s "
for s in libraries:
s_lib = s_lib + "-l%s "%s
log = name + ".log"
extra_args = '--quiet --f77exec=sage-inline-fortran --f90exec=sage-inline-fortran %s %s >"%s" 2>&1'%(
s_lib_path, s_lib, log)
f2py.compile(x, name, extra_args = extra_args, source_fn=fortran_file)
log_string = open(log).read()
# f2py.compile() doesn't raise any exception if it fails.
# So we manually check whether the compiled file exists.
# NOTE: the .so extension is used expect on Cygwin,
# that is even on OS X where .dylib might be expected.
soname = name
uname = os.uname()[0].lower()
if uname[:6] == "cygwin":
soname += '.dll'
else:
soname += '.so'
if not os.path.isfile(soname):
raise RuntimeError("failed to compile Fortran code:\n" + log_string)
if verbose:
print log_string
m = __builtin__.__import__(name)
finally:
os.sys.path = old_import_path
os.chdir(old_cwd)
try:
import shutil
shutil.rmtree(mytmpdir)
except OSError:
# This can fail for example over NFS
pass
for k, x in m.__dict__.iteritems():
if k[0] != '_':
salvus.namespace[k] = x
def sh(code):
"""
Run a bash script in Salvus.
EXAMPLES:
Use as a block decorator on a single line::
%sh pwd
and multiline
%sh
echo "hi"
pwd
ls -l
You can also just directly call it::
sh('pwd')
The output is printed. To capture it, use capture
%capture(stdout='output')
%sh pwd
After that, the variable output contains the current directory
"""
return script('/bin/bash')(code)
# Monkey patch the R interpreter interface to support graphics, when
# used as a decorator.
import sage.interfaces.r
def r_eval0(*args, **kwds):
return sage.interfaces.r.R.eval(sage.interfaces.r.r, *args, **kwds).strip('\n')
_r_plot_options = ''
def set_r_plot_options(width=7, height=7):
global _r_plot_options
_r_plot_options = ", width=%s, height=%s"%(width, height)
r_dev_on = False
def r_eval(code, *args, **kwds):
"""
Run a block of R code.
EXAMPLES::
sage: print r.eval("summary(c(1,2,3,111,2,3,2,3,2,5,4))") # outputs a string
Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 2.00 3.00 12.55 3.50 111.00
In the notebook, you can put %r at the top of a cell, or type "%default_mode r" into
a cell to set the whole worksheet to r mode.
NOTE: Any plots drawn using the plot command should "just work", without having
to mess with special devices, etc.
"""
# Only use special graphics support when using r as a cell decorator, since it has
# a 10ms penalty (factor of 10 slowdown) -- which doesn't matter for interactive work, but matters
# a lot if one had a loop with r.eval in it.
if sage.interfaces.r.r not in salvus.code_decorators:
return r_eval0(code, *args, **kwds)
global r_dev_on
if r_dev_on:
return r_eval0(code, *args, **kwds)
try:
r_dev_on = True
tmp = '/tmp/' + uuid() + '.svg'
r_eval0("svg(filename='%s'%s)"%(tmp, _r_plot_options))
s = r_eval0(code, *args, **kwds)
r_eval0('dev.off()')
return s
finally:
r_dev_on = False
if os.path.exists(tmp):
salvus.stdout('\n'); salvus.file(tmp, show=True); salvus.stdout('\n')
os.unlink(tmp)
sage.interfaces.r.r.eval = r_eval
sage.interfaces.r.r.set_plot_options = set_r_plot_options
def prun(code):
"""
Use %prun followed by a block of code to profile execution of that
code. This will display the resulting profile, along with a menu
to select how to sort the data.
EXAMPLES:
Profile computing a tricky integral (on a single line)::
%prun integrate(sin(x^2),x)
Profile a block of code::
%prun
E = EllipticCurve([1..5])
v = E.anlist(10^5)
r = E.rank()
"""
import cProfile, pstats
from sage.misc.all import tmp_filename
filename = tmp_filename()
cProfile.runctx(salvus.namespace['preparse'](code), salvus.namespace, locals(), filename)
@interact
def f(title = text_control('', "<h1>Salvus Profiler</h1>"),
sort=("First sort by", selector([('calls', 'number of calls to the function'),
('time', ' total time spent in the function'),
('cumulative', 'total time spent in this and all subfunctions (from invocation till exit)'),
('module', 'name of the module that contains the function'),
('name', 'name of the function')
], width="100%", default='time')),
strip_dirs=True):
try:
p = pstats.Stats(filename)
if strip_dirs:
p.strip_dirs()
p.sort_stats(sort)
p.print_stats()
except Exception, msg:
print msg
##############################################################
# The %fork cell decorator.
##############################################################
def _wait_in_thread(pid, callback, filename):
from sage.structure.sage_object import load
def wait():
try:
os.waitpid(pid,0)
callback(load(filename))
except Exception, msg:
callback(msg)
from threading import Thread
t = Thread(target=wait, args=tuple([]))
t.start()
def async(f, args, kwds, callback):
"""
Run f in a forked subprocess with given args and kwds, then call the
callback function when f terminates.
"""
from sage.misc.all import tmp_filename
filename = tmp_filename() + '.sobj'
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid:
# The parent master process
try:
_wait_in_thread(pid, callback, filename)
return pid
finally:
if os.path.exists(filename):
os.unlink(filename)
else:
# The child process
try:
result = f(*args, **kwds)
except Exception, msg:
result = str(msg)
from sage.structure.sage_object import save
save(result, filename)
os._exit(0)
class Fork(object):
"""
The %fork block decorator evaluates its code in a forked subprocess
that does not block the main process.
You may still use the @fork function decorator from Sage, as usual,
to run a function in a subprocess. Type "sage.all.fork?" to see
the help for the @fork decorator.
WARNING: This is highly experimental and possibly flaky. Use with
caution.
All (picklelable) global variables that are set in the forked
subprocess are set in the parent when the forked subprocess
terminates. However, the forked subprocess has no other side
effects, except what it might do to file handles and the
filesystem.
To see currently running forked subprocesses, type
fork.children(), which returns a dictionary {pid:execute_uuid}.
To kill a given subprocess and stop the cell waiting for input,
type fork.kill(pid). This is currently the only way to stop code
running in %fork cells.
TODO/WARNING: The subprocesses spawned by fork are not killed
if the parent process is killed first!
NOTE: All pexpect interfaces are reset in the child process.
"""
def __init__(self):
self._children = {}
def children(self):
return dict(self._children)
def __call__(self, s):
if isinstance(s, types.FunctionType): # check for decorator usage
import sage.parallel.decorate
return sage.parallel.decorate.fork(s)
salvus._done = False
id = salvus._id
changed_vars = set([])
def change(var, val):
changed_vars.add(var)
def f():
# Run some commands to tell Sage that its
# pid has changed.
import sage.misc.misc
reload(sage.misc.misc)
# The pexpect interfaces (and objects defined in them) are
# not valid.
sage.interfaces.quit.invalidate_all()
salvus.namespace.on('change', None, change)
salvus.execute(s)
result = {}
from sage.structure.sage_object import dumps
for var in changed_vars:
try:
result[var] = dumps(salvus.namespace[var])
except:
result[var] = 'unable to pickle %s'%var
return result
from sage.structure.sage_object import loads
def g(s):
if isinstance(s, Exception):
sys.stderr.write(str(s))
sys.stderr.flush()
else:
for var, val in s.iteritems():
try:
salvus.namespace[var] = loads(val)
except:
print "unable to unpickle %s"%var
salvus._conn.send_json({'event':'output', 'id':id, 'done':True})
if pid in self._children:
del self._children[pid]
pid = async(f, tuple([]), {}, g)
print "Forked subprocess %s"%pid
self._children[pid] = id
def kill(self, pid):
if pid in self._children:
salvus._conn.send_json({'event':'output', 'id':self._children[pid], 'done':True})
os.kill(pid, 9)
del self._children[pid]
else:
raise ValueError, "Unknown pid = (%s)"%pid
fork = Fork()
####################################################
# Display of 2d/3d graphics objects
####################################################
from sage.misc.all import tmp_filename
from sage.plot.animate import Animation
import matplotlib.figure
def show_animation(obj, delay=20, gif=False, **kwds):
if gif:
t = tmp_filename(ext='.gif')
obj.gif(delay, t, **kwds)
salvus.file(t, raw=False)
os.unlink(t)
else:
t = tmp_filename(ext='.webm')
obj.ffmpeg(t, delay=delay, **kwds)
salvus.file(t, raw=True) # and let delete when worksheet ends - need this so can replay video.
def show_2d_plot_using_matplotlib(obj, svg, **kwds):
if isinstance(obj, matplotlib.image.AxesImage):
# The result of imshow, e.g.,
#
# from matplotlib import numpy, pyplot
# pyplot.imshow(numpy.random.random_integers(255, size=(100,100,3)))
#
t = tmp_filename(ext='.png')
obj.write_png(t)
salvus.file(t)
os.unlink(t)
return
if isinstance(obj, matplotlib.axes.Axes):
obj = obj.get_figure()
if 'events' in kwds:
from graphics import InteractiveGraphics
ig = InteractiveGraphics(obj, **kwds['events'])
n = '__a'+uuid().replace('-','') # so it doesn't get garbage collected instantly.
obj.__setattr__(n, ig)
kwds2 = dict(kwds)
del kwds2['events']
ig.show(**kwds2)
else:
t = tmp_filename(ext = '.svg' if svg else '.png')
if isinstance(obj, matplotlib.figure.Figure):
obj.savefig(t, **kwds)
else:
obj.save(t, **kwds)
salvus.file(t)
os.unlink(t)
def show_3d_plot_using_tachyon(obj, **kwds):
t = tmp_filename(ext = '.png')
obj.save(t, **kwds)
salvus.file(t)
os.unlink(t)
def show_graph_using_d3(obj, **kwds):
salvus.d3_graph(obj, **kwds)
def plot3d_using_matplotlib(expr, rangeX, rangeY,
density=40, elev=45., azim=35.,
alpha=0.85, cmap=None):
"""
Plots a symbolic expression in two variables on a two dimensional grid
and renders the function using matplotlib's 3D projection.
The purpose is to make it possible to create vectorized images (PDF, SVG)
for high-resolution images in publications -- instead of rasterized image formats.
Example::
%var x y
plot3d_using_matplotlib(x^2 + (1-y^2), (x, -5, 5), (y, -5, 5))
Arguments::
* expr: symbolic expression, e.g. x^2 - (1-y)^2
* rangeX: triple: (variable, minimum, maximum), e.g. (x, -10, 10)
* rangeY: like rangeX
* density: grid density
* elev: elevation, e.g. 45
* azim: azimuth, e.g. 35
* alpha: alpha transparency of plot (default: 0.85)
* cmap: matplotlib colormap, e.g. matplotlib.cm.Blues (default)
"""
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import numpy as np
cmap = cmap or cm.Blues
plt.cla()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=elev, azim=azim)
xx = np.linspace(rangeX[1], rangeX[2], density)
yy = np.linspace(rangeY[1], rangeY[2], density)
X, Y = np.meshgrid(xx, yy)
import numpy as np
exprv = np.vectorize(lambda x1, x2 : \
float(expr.subs({rangeX[0] : x1, rangeY[0] : x2})))
Z = exprv(X, Y)
zlim = np.min(Z), np.max(Z)
ax.plot_surface(X, Y, Z, alpha=alpha, cmap=cmap, linewidth=.5,
shade=True,
rstride=int(len(xx)/10),
cstride=int(len(yy)/10))
ax.set_xlabel('X')
ax.set_xlim(*rangeX[1:])
ax.set_ylabel('Y')
ax.set_ylim(*rangeY[1:])
ax.set_zlabel('Z')
ax.set_zlim(*zlim)
plt.show()
from sage.plot.graphics import Graphics, GraphicsArray
from sage.plot.plot3d.base import Graphics3d
import cgi
def show(*objs, **kwds):
"""
Show a 2d or 3d graphics object (or objects), animation, or matplotlib figure, or show an
expression typeset nicely using LaTeX.
- display: (default: True); if True, use display math for expression (big and centered).
- svg: (default: True); if True, show 2d plots using svg (otherwise use png)
- d3: (default: True); if True, show graphs (vertices and edges) using an interactive D3 viewer
for the many options for this viewer, type 'import graphics; graphics.graph_to_d3_jsonable?'
If false, graphs are converted to plots and displayed as usual.
- renderer: (default: 'webgl'); for 3d graphics
- 'webgl' (fastest) using hardware accelerated 3d;
- 'canvas' (slower) using a 2d canvas, but may work better with transparency;
- 'tachyon' -- a ray traced static image.
- spin: (default: False); spins 3d plot, with number determining speed (requires mouse over plot)
- events: if given, {'click':foo, 'mousemove':bar}; each time the user clicks,
the function foo is called with a 2-tuple (x,y) where they clicked. Similarly
for mousemove. This works for Sage 2d graphics and matplotlib figures.
ANIMATIONS:
- animations are by default encoded and displayed using an efficiently web-friendly
format (currently webm, which is **not supported** by Safari or IE).
- ``delay`` - integer (default: 20); delay in hundredths of a
second between frames.
- gif=False -- if you set gif=True, instead use an animated gif,
which is much less efficient, but works on all browsers.
You can also use options directly to the animate command, e.g., the figsize option below:
a = animate([plot(sin(x + a), (x, 0, 2*pi)) for a in [0, pi/4, .., 2*pi]], figsize=6)
show(a, delay=30)
EXAMPLES:
Some examples:
show(2/3)
show([1, 4/5, pi^2 + e], 1+pi)
show(x^2, display=False)
show(e, plot(sin))
Here's an example that illustrates creating a clickable image with events::
@interact
def f0(fun=x*sin(x^2), mousemove='', click='(0,0)'):
click = sage_eval(click)
g = plot(fun, (x,0,5), zorder=0) + point(click, color='red', pointsize=100, zorder=10)
ymax = g.ymax(); ymin = g.ymin()
m = fun.derivative(x)(x=click[0])
b = fun(x=click[0]) - m*click[0]
g += plot(m*x + b, (click[0]-1,click[0]+1), color='red', zorder=10)
def h(p):
f0.mousemove = p
def c(p):
f0(click=p)
show(g, events={'click':c, 'mousemove':h}, svg=True, gridlines='major', ymin=ymin, ymax=ymax)
"""
# svg=True, d3=True,
svg = kwds.get('svg',True)
d3 = kwds.get('d3',True)
display = kwds.get('display', True)
for t in ['svg', 'd3', 'display']:
if t in kwds:
del kwds[t]
import graphics
def show0(obj, combine_all=False):
# Either show the object and return None or
# return a string of html to represent obj.
if isinstance(obj, (Graphics, GraphicsArray, matplotlib.figure.Figure, matplotlib.axes.Axes, matplotlib.image.AxesImage)):
show_2d_plot_using_matplotlib(obj, svg=svg, **kwds)
elif isinstance(obj, Animation):
show_animation(obj, **kwds)
elif isinstance(obj, Graphics3d):
if kwds.get('viewer') == 'tachyon':
show_3d_plot_using_tachyon(obj, **kwds)
else:
salvus.threed(obj, **kwds)
# graphics.show_3d_plot_using_threejs(obj, **kwds)
elif isinstance(obj, (sage.graphs.graph.Graph, sage.graphs.digraph.DiGraph)):
if d3:
show_graph_using_d3(obj, **kwds)
else:
show(obj.plot(), **kwds)
elif isinstance(obj, str):
return obj
elif isinstance(obj, (list, tuple)):
v = []
for a in obj:
b = show0(a)
if b is not None:
v.append(b)
if combine_all:
return ' '.join(v)
s = ', '.join(v)
if isinstance(obj, list):
return '[%s]'%s
else:
return '(%s)'%s
else:
if display:
return "$\\displaystyle %s$"%sage.misc.latex.latex(obj)
else:
return "$%s$"%sage.misc.latex.latex(obj)
s = show0(objs, combine_all=True)
if s is not None:
if display:
salvus.html("<div align='center'>%s</div>"%cgi.escape(s))
else:
salvus.html("<div>%s</div>"%cgi.escape(s))
# Make it so plots plot themselves correctly when they call their repr.
Graphics.show = show
GraphicsArray.show = show
Animation.show = show
# Very "evil" abuse of the display manager, so sphere().show() works:
try:
from sage.repl.rich_output import get_display_manager
get_display_manager().display_immediately = show
except:
# so doesn't crash on older versions of Sage.
pass
###################################################
# %auto -- automatically evaluate a cell on load
###################################################
def auto(s):
"""
The %auto decorator sets a cell so that it will be automatically
executed when the Sage process first starts.
Thus %auto allows you to initialize functions, variables, interacts,
etc., e.g., when loading a worksheet.
NOTE: The %auto decorator just calls salvus.auto(True), which sets
a cell metatag. You *must* execute the cell containing %auto at
least once in order for it to work.
"""
salvus.auto(True)
return s # the do-nothing block decorator.
def hide(component='input'):
"""
Hide a component of a cell. By default, hide hides the the code
editor part of the cell, but you can hide other parts by passing
in an optional argument:
'input', 'output'
Use the cell.show(...) function to reveal a cell component.
"""
if component not in ['input', 'output']:
# Allow %hide to work, for compatability with sagenb.
hide('input')
return component
cell.hide(component)
def hideall(code=None):
cell.hideall()
if code is not None: # for backwards compat with sagenb
return code
##########################################################
# A "%exercise" cell mode -- a first step toward
# automated homework.
##########################################################
class Exercise:
def __init__(self, question, answer, check=None, hints=None):
import sage.all
from sage.structure.element import is_Matrix
if not (isinstance(answer, (tuple, list)) and len(answer) == 2):
if is_Matrix(answer):
default = sage.all.parent(answer)(0)
else:
default = ''
answer = [answer, default]
if check is None:
R = sage.all.parent(answer[0])
def check(attempt):
return R(attempt) == answer[0]
if hints is None:
hints = ['','','',"The answer is %s."%answer[0]]
self._question = question
self._answer = answer
self._check = check
self._hints = hints
def _check_attempt(self, attempt, interact):
from sage.misc.all import walltime
response = "<div class='well'>"
try:
r = self._check(attempt)
if isinstance(r, tuple) and len(r)==2:
correct = r[0]
comment = r[1]
else:
correct = bool(r)
comment = ''
except TypeError, msg:
response += "<h3 style='color:darkgreen'>Huh? -- %s (attempt=%s)</h3>"%(msg, attempt)
else:
if correct:
response += "<h1 style='color:blue'>RIGHT!</h1>"
if self._start_time:
response += "<h2 class='lighten'>Time: %.1f seconds</h2>"%(walltime()-self._start_time,)
if self._number_of_attempts == 1:
response += "<h3 class='lighten'>You got it first try!</h3>"
else:
response += "<h3 class='lighten'>It took you %s attempts.</h3>"%(self._number_of_attempts,)
else:
response += "<h3 style='color:darkgreen'>Not correct yet...</h3>"
if self._number_of_attempts == 1:
response += "<h4 style='lighten'>(first attempt)</h4>"
else:
response += "<h4 style='lighten'>(%s attempts)</h4>"%self._number_of_attempts
if self._number_of_attempts > len(self._hints):
hint = self._hints[-1]
else:
hint = self._hints[self._number_of_attempts-1]
if hint:
response += "<span class='lighten'>(HINT: %s)</span>"%(hint,)
if comment:
response += '<h4>%s</h4>'%comment
response += "</div>"
interact.feedback = text_control(response,label='')
return correct
def ask(self, cb):
from sage.misc.all import walltime
self._start_time = walltime()
self._number_of_attempts = 0
attempts = []
@interact(layout=[[('question',12)],[('attempt',12)], [('feedback',12)]])
def f(question = ("<b>Question:</b>", text_control(self._question)),
attempt = ('<b>Answer:</b>',self._answer[1])):
if 'attempt' in interact.changed() and attempt != '':
attempts.append(attempt)
if self._start_time == 0:
self._start_time = walltime()
self._number_of_attempts += 1
if self._check_attempt(attempt, interact):
cb({'attempts':attempts, 'time':walltime()-self._start_time})
def exercise(code):
r"""
Use the %exercise cell decorator to create interactive exercise
sets. Put %exercise at the top of the cell, then write Sage code
in the cell that defines the following (all are optional):
- a ``question`` variable, as an HTML string with math in dollar
signs
- an ``answer`` variable, which can be any object, or a pair
(correct_value, interact control) -- see the docstring for
interact for controls.
- an optional callable ``check(answer)`` that returns a boolean or
a 2-tuple
(True or False, message),
where the first argument is True if the answer is correct, and
the optional second argument is a message that should be
displayed in response to the given answer. NOTE: Often the
input "answer" will be a string, so you may have to use Integer,
RealNumber, or sage_eval to evaluate it, depending
on what you want to allow the user to do.
- hints -- optional list of strings to display in sequence each
time the user enters a wrong answer. The last string is
displayed repeatedly. If hints is omitted, the correct answer
is displayed after three attempts.
NOTE: The code that defines the exercise is executed so that it
does not impact (and is not impacted by) the global scope of your
variables elsewhere in your session. Thus you can have many
%exercise cells in a single worksheet with no interference between
them.
The following examples further illustrate how %exercise works.
An exercise to test your ability to sum the first $n$ integers::
%exercise
title = "Sum the first n integers, like Gauss did."
n = randint(3, 100)
question = "What is the sum $1 + 2 + \\cdots + %s$ of the first %s positive integers?"%(n,n)
answer = n*(n+1)//2
Transpose a matrix::
%exercise
title = r"Transpose a $2 \times 2$ Matrix"
A = random_matrix(ZZ,2)
question = "What is the transpose of $%s?$"%latex(A)
answer = A.transpose()
Add together a few numbers::
%exercise
k = randint(2,5)
title = "Add %s numbers"%k
v = [randint(1,10) for _ in range(k)]
question = "What is the sum $%s$?"%(' + '.join([str(x) for x in v]))
answer = sum(v)
The trace of a matrix::
%exercise
title = "Compute the trace of a matrix."
A = random_matrix(ZZ, 3, x=-5, y = 5)^2
question = "What is the trace of $$%s?$$"%latex(A)
answer = A.trace()
Some basic arithmetic with hints and dynamic feedback::
%exercise
k = randint(2,5)
title = "Add %s numbers"%k
v = [randint(1,10) for _ in range(k)]
question = "What is the sum $%s$?"%(' + '.join([str(x) for x in v]))
answer = sum(v)
hints = ['This is basic arithmetic.', 'The sum is near %s.'%(answer+randint(1,5)), "The answer is %s."%answer]
def check(attempt):
c = Integer(attempt) - answer
if c == 0:
return True
if abs(c) >= 10:
return False, "Gees -- not even close!"
if c < 0:
return False, "too low"
if c > 0:
return False, "too high"
"""
f = closure(code)
def g():
x = f()
return x.get('title',''), x.get('question', ''), x.get('answer',''), x.get('check',None), x.get('hints',None)
title, question, answer, check, hints = g()
obj = {}
obj['E'] = Exercise(question, answer, check, hints)
obj['title'] = title
def title_control(t):
return text_control('<h3 class="lighten">%s</h3>'%t)
the_times = []
@interact(layout=[[('go',1), ('title',11,'')],[('')], [('times',12, "<b>Times:</b>")]], flicker=True)
def h(go = button(" "*5 + "Go" + " "*7, label='', icon='fa-refresh', classes="btn-large btn-success"),
title = title_control(title),
times = text_control('')):
c = interact.changed()
if 'go' in c or 'another' in c:
interact.title = title_control(obj['title'])
def cb(obj):
the_times.append("%.1f"%obj['time'])
h.times = ', '.join(the_times)
obj['E'].ask(cb)
title, question, answer, check, hints = g() # get ready for next time.
obj['title'] = title
obj['E'] = Exercise(question, answer, check, hints)
def closure(code):
"""
Wrap the given code block (a string) in a closure, i.e., a
function with an obfuscated random name.
When called, the function returns locals().
"""
import uuid
# TODO: strip string literals first
code = ' ' + ('\n '.join(code.splitlines()))
fname = "__" + str(uuid.uuid4()).replace('-','_')
closure = "def %s():\n%s\n return locals()"%(fname, code)
class Closure:
def __call__(self):
return self._f()
c = Closure()
salvus.execute(closure)
c._f = salvus.namespace[fname]
del salvus.namespace[fname]
return c
#########################################
# Dynamic variables (linked to controls)
#########################################
def _dynamic(var, control=None):
if control is None:
control = salvus.namespace.get(var,'')
@interact(layout=[[(var,12)]], output=False)
def f(x=(var,control)):
salvus.namespace.set(var, x, do_not_trigger=[var])
def g(y):
f.x = y
salvus.namespace.on('change', var, g)
if var in salvus.namespace:
x = salvus.namespace[var]
def dynamic(*args, **kwds):
"""
Make variables in the global namespace dynamically linked to a control from the
interact label (see the documentation for interact).
EXAMPLES:
Make a control linked to a variable that doesn't yet exist::
dynamic('xyz')
Make a slider and a selector, linked to t and x::
dynamic(t=(1..10), x=[1,2,3,4])
t = 5 # this changes the control
"""
for var in args:
if not isinstance(var, str):
i = id(var)
for k,v in salvus.namespace.iteritems():
if id(v) == i:
_dynamic(k)
return
else:
_dynamic(var)
for var, control in kwds.iteritems():
_dynamic(var, control)
import sage.all
def var0(*args, **kwds):
if len(args)==1:
name = args[0]
else:
name = args
G = salvus.namespace
v = sage.all.SR.var(name, **kwds)
if isinstance(v, tuple):
for x in v:
G[repr(x)] = x
else:
G[repr(v)] = v
return v
def var(*args, **kwds):
"""
Create symbolic variables and inject them into the global namespace.
NOTE: In SageMathCloud, you can use var as a line decorator::
%var x
%var a,b,theta # separate with commas
%var x y z t # separate with spaces
Use latex_name to customizing how the variables is typeset:
var1 = var('var1', latex_name=r'\sigma^2_1')
show(e^(var1**2))
Multicolored variables made using the %var line decorator:
%var(latex_name=r"\color{green}{\theta}") theta
%var(latex_name=r"\color{red}{S_{u,i}}") sui
show(expand((sui + x^3 + theta)^2))
Here is the docstring for var in Sage:
"""
if 'latex_name' in kwds:
# wrap with braces -- sage should probably do this, but whatever.
kwds['latex_name'] = '{%s}'%kwds['latex_name']
if len(args) > 0:
return var0(*args, **kwds)
else:
def f(s):
return var0(s, *args, **kwds)
return f
var.__doc__ += sage.all.var.__doc__
#############################################
# Variable reset -- we have to rewrite
# this because of all the monkey patching
# that we do.
#############################################
import sage.misc.reset
def reset(vars=None, attached=False):
"""
If vars is specified, just restore the value of vars and leave
all other variables alone. In SageMathCloud, you can also use
reset as a line decorator::
%reset x, pi, sin # comma-separated
%reset x pi sin # commas are optional
If vars is not given, delete all user-defined variables, reset
all global variables back to their default states, and reset
all interfaces to other computer algebra systems.
Original reset docstring::
"""
if vars is not None:
restore(vars)
return
G = salvus.namespace
T = type(sys) # module type
for k in G.keys():
if k[0] != '_' and type(k) != T:
try:
del G[k]
except KeyError:
pass
restore()
from sage.symbolic.assumptions import forget; forget()
sage.misc.reset.reset_interfaces()
if attached:
sage.misc.reset.reset_attached()
reset.__doc__ += sage.misc.reset.reset.__doc__
def restore(vars=None):
""
if isinstance(vars, unicode):
vars = str(vars) # sage.misc.reset is unicode ignorant
if ',' in vars: # sage.misc.reset is stupid about commas and space -- TODO: make a patch to sage
vars = [v.strip() for v in vars.split(',')]
import sage.calculus.calculus
sage.misc.reset._restore(salvus.namespace, default_namespace, vars)
sage.misc.reset._restore(sage.calculus.calculus.syms_cur, sage.calculus.calculus.syms_default, vars)
restore.__doc__ += sage.misc.reset.restore.__doc__
# NOTE: this is not used anymore
def md2html(s):
from markdown2Mathjax import sanitizeInput, reconstructMath
from markdown2 import markdown
delims = [('\\(','\\)'), ('$$','$$'), ('\\[','\\]'),
('\\begin{equation}', '\\end{equation}'), ('\\begin{equation*}', '\\end{equation*}'),
('\\begin{align}', '\\end{align}'), ('\\begin{align*}', '\\end{align*}'),
('\\begin{eqnarray}', '\\end{eqnarray}'), ('\\begin{eqnarray*}', '\\end{eqnarray*}'),
('\\begin{math}', '\\end{math}'),
('\\begin{displaymath}', '\\end{displaymath}')
]
tmp = [((s,None),None)]
for d in delims:
tmp.append((sanitizeInput(tmp[-1][0][0], equation_delims=d), d))
extras = ['code-friendly', 'footnotes', 'smarty-pants', 'wiki-tables']
markedDownText = markdown(tmp[-1][0][0], extras=extras)
while len(tmp) > 1:
markedDownText = reconstructMath(markedDownText, tmp[-1][0][1], equation_delims=tmp[-1][1])
del tmp[-1]
return markedDownText
# NOTE: this is not used anymore
class Markdown(object):
r"""
Cell mode that renders everything after %md as markdown and hides the input by default.
EXAMPLES::
---
%md
# A Title
## A subheading
---
%md(hide=False)
# A title
- a list
---
md("# A title", hide=False)
---
%md(hide=False) `some code`
This uses the Python markdown2 library with the following
extras enabled:
'code-friendly', 'footnotes',
'smarty-pants', 'wiki-tables'
See https://github.com/trentm/python-markdown2/wiki/Extras
We also use markdown2Mathjax so that LaTeX will be properly
typeset if it is wrapped in $'s and $$'s, \(, \), \[, \],
\begin{equation}, \end{equation}, \begin{align}, \end{align}.,
"""
def __init__(self, hide=True):
self._hide = hide
def __call__(self, *args, **kwds):
if len(kwds) > 0 and len(args) == 0:
return Markdown(**kwds)
if len(args) > 0:
self._render(args[0], **kwds)
def _render(self, s, hide=None):
if hide is None:
hide = self._hide
html(md2html(s),hide=hide)
# not used
#md = Markdown()
# Instead... of the above server-side markdown, we use this client-side markdown.
class Marked(object):
r"""
Cell mode that renders everything after %md as Github flavored
markdown [1] with mathjax and hides the input by default.
[1] https://help.github.com/articles/github-flavored-markdown
The rendering is done client-side using marked and mathjax.
EXAMPLES::
---
%md
# A Title
## A subheading
---
%md(hide=False)
# A title
- a list
---
md("# A title", hide=False)
---
%md(hide=False) `some code`
"""
def __init__(self, hide=True):
self._hide = hide
def __call__(self, *args, **kwds):
if len(kwds) > 0 and len(args) == 0:
return Marked(**kwds)
if len(args) > 0:
self._render(args[0], **kwds)
def _render(self, s, hide=None):
if hide is None:
hide = self._hide
if hide:
salvus.hide('input')
salvus.md(s)
md = Marked()
#####
## Raw Input
def raw_input(prompt='', default='', placeholder='', input_width=None, label_width=None, type=None):
"""
Read a string from the user in the worksheet interface to Sage.
INPUTS:
- prompt -- (default: '') a label to the left of the input
- default -- (default: '') default value to put in input box
- placeholder -- (default: '') default placeholder to put in grey when input box empty
- input_width -- (default: None) css that gives the width of the input box
- label_width -- (default: None) css that gives the width of the label
- type -- (default: None) if not given, returns a unicode string representing the exact user input.
Other options include:
- type='sage' -- will evaluate it to a sage expression in the global scope.
- type=anything that can be called, e.g., type=int, type=float.
OUTPUT:
- By default, returns a **unicode** string (not a normal Python str). However, can be customized
by changing the type.
EXAMPLE:
print salvus.raw_input("What is your full name?", default="Sage Math", input_width="20ex", label_width="15ex")
"""
return salvus.raw_input(prompt=prompt, default=default, placeholder=placeholder, input_width=input_width, label_width=label_width, type=type)
#####
## Clear
def clear():
"""
Clear the output of the current cell. You can use this to
dynamically animate the output of a cell using a for loop.
SEE ALSO: delete_last_output
"""
salvus.clear()
def delete_last_output():
"""
Delete the last output message.
SEE ALSO: clear
"""
salvus.delete_last_output()
#####
# Generic Pandoc cell decorator
def pandoc(fmt, doc=None, hide=True):
"""
INPUT:
- fmt -- one of 'docbook', 'haddock', 'html', 'json', 'latex', 'markdown', 'markdown_github',
'markdown_mmd', 'markdown_phpextra', 'markdown_strict', 'mediawiki',
'native', 'opml', 'rst', 'textile'
- doc -- a string in the given format
OUTPUT:
- Called directly, you get the HTML rendered version of doc as a string.
- If you use this as a cell decorator, it displays the HTML output, e.g.,
%pandoc('mediawiki')
* ''Unordered lists'' are easy to do:
** Start every line with a star.
*** More stars indicate a deeper level.
"""
if doc is None:
return lambda x : html(pandoc(fmt, x), hide=hide) if x is not None else ''
import subprocess
p = subprocess.Popen(['pandoc', '-f', fmt, '--mathjax'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
if not isinstance(doc, unicode):
doc = unicode(doc, 'utf8')
p.stdin.write(doc.encode('UTF-8'))
p.stdin.close()
err = p.stderr.read()
if err:
raise RuntimeError(err)
return p.stdout.read()
def wiki(doc=None, hide=True):
"""
Mediawiki markup cell decorator. E.g.,
EXAMPLE::
%wiki(hide=False)
* ''Unordered lists'' and math like $x^3 - y^2$ are both easy
** Start every line with a star.
*** More stars indicate a deeper level. """
if doc is None:
return lambda doc: wiki(doc=doc, hide=hide) if doc else ''
html(pandoc('mediawiki', doc=doc), hide=hide)
mediawiki = wiki
######
def load_html_resource(filename):
fl = filename.lower()
if fl.startswith('http://') or fl.startswith('https://'):
# remote url
url = fl
else:
# local file
url = salvus.file(filename, show=False)
ext = os.path.splitext(filename)[1][1:].lower()
if ext == "css":
salvus.javascript('''$.get("%s", function(css) { $('<style type=text/css></style>').html(css).appendTo("body")});'''%url)
elif ext == "html":
# TODO: opts.element should change to cell.element when more canonical (need to finish some code in syncdoc)!
salvus.javascript('opts.element.append($("<div>").load("%s"))'%url)
elif ext == "coffee":
salvus.javascript('$.ajax({url:"%s"}).done(function(data) { eval(CoffeeScript.compile(data)); })'%url)
elif ext == "js":
salvus.html('<script src="%s"></script>'%url)
# Monkey-patched the load command
def load(*args, **kwds):
"""
Load Sage object from the file with name filename, which will have
an .sobj extension added if it doesn't have one. Or, if the input
is a filename ending in .py, .pyx, or .sage, load that file into
the current running session. Loaded files are not loaded into
their own namespace, i.e., this is much more like Python's
"execfile" than Python's "import".
You may also load an sobj or execute a code file available on the web
by specifying the full URL to the file. (Set ``verbose = False`` to
supress the download progress indicator.)
INPUT:
- args -- any number of filename strings with any of the following extensions:
.sobj, .sage, .py, .pyx, .html, .css, .js, .coffee, .pdf
- ``verbose`` -- (default: True) load file over the network.
If you load any of the web types (.html, .css, .js, .coffee), they are loaded
into the web browser DOM (or Javascript session), not the Python process.
If you load a pdf, it is displayed in the output of the worksheet. The extra
options are passed to salvus.pdf -- see the docstring for that.
In SageMathCloud you may also use load as a decorator, with filenames separated
by whitespace or commas::
%load foo.sage bar.py a.pyx, b.pyx
The following are all valid ways to use load::
%load a.html
%load a.css
%load a.js
%load a.coffee
%load a.css a.js a.coffee a.html
load('a.css', 'a.js', 'a.coffee', 'a.html')
load('a.css a.js a.coffee a.html')
load(['a.css', 'a.js', 'a.coffee', 'a.html'])
ALIAS: %runfile is the same as %load, for compatibility with IPython.
"""
if len(args) == 1:
if isinstance(args[0], (unicode,str)):
args = tuple(args[0].replace(',',' ').split())
if isinstance(args[0], (list, tuple)):
args = args[0]
if len(args) == 0 and len(kwds) == 1:
# This supports
# %load(verbose=False) a.sage
# which doesn't really matter right now, since there is a bug in Sage's own
# load command, where it isn't verbose for network code, but is for objects.
def f(*args):
return load(*args, **kwds)
return f
t = '__tmp__'; i=0
while t+str(i) in salvus.namespace:
i += 1
t += str(i)
# First handle HTML related args -- these are all very oriented toward cloud.sagemath worksheets
html_extensions = set(['js','css','coffee','html'])
other_args = []
for arg in args:
i = arg.rfind('.')
if i != -1 and arg[i+1:].lower() in html_extensions:
load_html_resource(arg)
elif i != -1 and arg[i+1:].lower() == 'pdf':
show_pdf(arg, **kwds)
else:
other_args.append(arg)
# pdf?
for arg in args:
i = arg.find('.')
# now handle remaining non-web arguments.
if len(other_args) > 0:
try:
exec 'salvus.namespace["%s"] = sage.structure.sage_object.load(*__args, **__kwds)'%t in salvus.namespace, {'__args':other_args, '__kwds':kwds}
return salvus.namespace[t]
finally:
try:
del salvus.namespace[t]
except: pass
# add alias, due to IPython.
runfile = load
## Make it so pylab (matplotlib) figures display, at least using pylab.show
import pylab
def _show_pylab(svg=True):
"""
Show a Pylab plot in a Sage Worksheet.
INPUTS:
- svg -- boolean (default: True); if True use an svg; otherwise, use a png.
"""
try:
ext = '.svg' if svg else '.png'
filename = uuid() + ext
pylab.savefig(filename)
salvus.file(filename)
finally:
try:
os.unlink(filename)
except:
pass
pylab.show = _show_pylab
matplotlib.figure.Figure.show = show
import matplotlib.pyplot
def _show_pyplot(svg=True):
"""
Show a Pylab plot in a Sage Worksheet.
INPUTS:
- svg -- boolean (default: True); if True use an svg; otherwise, use a png.
"""
try:
ext = '.svg' if svg else '.png'
filename = uuid() + ext
matplotlib.pyplot.savefig(filename)
salvus.file(filename)
finally:
try:
os.unlink(filename)
except:
pass
matplotlib.pyplot.show = _show_pyplot
## Our own displayhook
_system_sys_displayhook = sys.displayhook
def displayhook(obj):
if isinstance(obj, (Graphics3d, Graphics, GraphicsArray, matplotlib.figure.Figure, matplotlib.axes.Axes, matplotlib.image.AxesImage, Animation)):
show(obj)
else:
_system_sys_displayhook(obj)
sys.displayhook = displayhook
import sage.misc.latex, types
# We make this a list so that users can append to it easily.
TYPESET_MODE_EXCLUDES = [sage.misc.latex.LatexExpr, types.NoneType,
type, sage.plot.plot3d.base.Graphics3d,
sage.plot.graphics.Graphics,
sage.plot.graphics.GraphicsArray]
def typeset_mode(on=True, display=True, **args):
"""
Turn typeset mode on or off. When on, each output is typeset using LaTeX.
EXAMPLES::
typeset_mode() # turns typesetting on
typeset_mode(False) # turn typesetting off
typeset_mode(True, display=False) # typesetting mode on, but do not make output big and centered
"""
if on:
def f(obj):
if isinstance(obj, tuple(TYPESET_MODE_EXCLUDES)):
displayhook(obj)
else:
salvus.tex(obj, display=display)
sys.displayhook = f
else:
sys.displayhook = displayhook
def default_mode(mode):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use default_mode() to return the current mode.
Use default_mode("") to have no default mode.
EXAMPLES::
Make Pari/GP the default mode:
default_mode("gp")
default_mode() # outputs "gp"
Then switch back to Sage::
default_mode("") # or default_mode("sage")
You can also use default_mode as a line decorator::
%default_mode gp # equivalent to default_mode("gp")
"""
return salvus.default_mode(mode)
#######################################################
# Monkey patching and deprecation --
#######################################################
# Monkey patch around a bug in Python's findsource that breaks deprecation in cloud worksheets.
# This won't matter if we switch to not using exec, since then there will be a file behind
# each block of code. However, for now we have to do this.
import inspect
_findsource = inspect.findsource
def findsource(object):
try: return _findsource(object)
except: raise IOError('source code not available') # as *claimed* by the Python docs!
inspect.findsource = findsource
#######################################################
# Viewing pdf's
#######################################################
def show_pdf(filename, viewer="object", width=1000, height=600, scale=1.6):
"""
Display a PDF file from the filesystem in an output cell of a worksheet.
INPUT:
- filename
- viewer -- 'object' (default): use html object tag, which uses the browser plugin, or
provides a download link in case the browser can't display pdf's.
-- 'pdfjs' (experimental): use the pdf.js pure HTML5 viewer, which doesn't require any plugins
(this works on more browser, but may be slower and uglier)
- width -- (default: 1000) -- pixel width of viewer
- height -- (default: 600) -- pixel height of viewer
- scale -- (default: 1.6) -- zoom scale (only applies to pdfjs)
"""
url = salvus.file(filename, show=False)
if viewer == 'object':
s = '<object data="%s" type="application/pdf" width="%s" height="%s"> Your browser doesn\'t support embedded PDF\'s, but you can <a href="%s">download %s</a></p> </object>'%(url, width, height, url, filename)
salvus.html(s)
elif viewer == 'pdfjs':
import uuid
id = 'a'+str(uuid())
salvus.html('<div id="%s" style="background-color:white; width:%spx; height:%spx; cursor:pointer; overflow:auto;"></div>'%(id, width, height))
salvus.html("""
<!-- pdf.js-based embedded javascript PDF viewer -->
<!-- File from the PDF.JS Library -->
<script type="text/javascript" src="pdfListView/external/compatibility.js"></script>
<script type="text/javascript" src="pdfListView/external/pdf.js"></script>
<!-- to disable webworkers: swap these below -->
<!-- <script type="text/javascript">PDFJS.disableWorker = true;</script> -->
<script type="text/javascript">PDFJS.workerSrc = 'pdfListView/external/pdf.js';</script>
<link rel="stylesheet" href="pdfListView/src/TextLayer.css">
<script src="pdfListView/src/TextLayerBuilder.js"></script>
<link rel="stylesheet" href="pdfListView/src/AnnotationsLayer.css">
<script src="pdfListView/src/AnnotationsLayerBuilder.js"></script>
<script src="pdfListView/src/PdfListView.js"></script>
""")
salvus.javascript('''
var lv = new PDFListView($("#%s")[0], {textLayerBuilder:TextLayerBuilder, annotationsLayerBuilder: AnnotationsLayerBuilder});
lv.setScale(%s);
lv.loadPdf("%s")'''%(
id, scale, url))
else:
raise RuntimeError("viewer must be 'object' or 'pdfjs'")
########################################################
# WebRTC Support
########################################################
def sage_chat(chatroom=None, height="258px"):
if chatroom is None:
from random import randint
chatroom = randint(0,1e24)
html("""
<iframe src="/static/webrtc/group_chat_cell.html?%s" height="%s" width="100%%"></iframe>
"""%(chatroom, height), hide=False)
########################################################
# Documentation of magics
########################################################
def magics(dummy=None):
"""
Type %magics to print all SageMathCloud magic commands or
magics() to get a list of them.
To use a magic command, either type
%command <a line of code>
or
%command
[rest of cell]
Create your own magic command by defining a function that takes
a string as input and outputs a string. (Yes, it is that simple.)
"""
import re
magic_cmds = set()
for s in open(os.path.realpath(__file__), 'r').xreadlines():
s = s.strip()
if s.startswith('%'):
magic_cmds.add(re.findall(r'%[a-zA-Z]+', s)[0])
magic_cmds.discard('%s')
for k,v in sage.interfaces.all.__dict__.iteritems():
if isinstance(v, sage.interfaces.expect.Expect):
magic_cmds.add('%'+k)
magic_cmds.update(['%cython', '%time', '%magics', '%auto', '%hide', '%hideall',
'%fork', '%runfile', '%default_mode', '%typeset_mode'])
v = list(sorted(magic_cmds))
if dummy is None:
return v
else:
for s in v:
print(s)
########################################################
# Go magic
########################################################
def go(s):
"""
Run a go program. For example,
%go
func main() { fmt.Println("Hello World") }
You can set the whole worksheet to be in go mode by typing
%default_mode go
NOTES:
- The official Go tutorial as a long Sage Worksheet is available here:
https://github.com/sagemath/cloud-examples/tree/master/go
- There is no relation between one cell and the next. Each is a separate
self-contained go program, which gets compiled and run, with the only
side effects being changes to the filesystem. The program itself is
stored in a random file that is deleted after it is run.
- The %go command automatically adds 'package main' and 'import "fmt"'
(if fmt. is used) to the top of the program, since the assumption
is that you're using %go interactively.
"""
import uuid
name = str(uuid.uuid4())
if 'fmt.' in s and '"fmt"' not in s and "'fmt'" not in s:
s = 'import "fmt"\n' + s
if 'package main' not in s:
s = 'package main\n' + s
try:
open(name +'.go','w').write(s.encode("UTF-8"))
(child_stdin, child_stdout, child_stderr) = os.popen3('go build %s.go'%name)
err = child_stderr.read()
sys.stdout.write(child_stdout.read())
sys.stderr.write(err)
sys.stdout.flush()
sys.stderr.flush()
if not os.path.exists(name): # failed to produce executable
return
(child_stdin, child_stdout, child_stderr) = os.popen3("./" + name)
sys.stdout.write(child_stdout.read())
sys.stderr.write(child_stderr.read())
sys.stdout.flush()
sys.stderr.flush()
finally:
try:
os.unlink(name+'.go')
except:
pass
try:
os.unlink(name)
except:
pass
# Julia pexepect interface support
import julia
import sage.interfaces
sage.interfaces.julia = julia # the module
julia = julia.julia # specific instance
sage.interfaces.all.julia = julia
# Help command
import sage.misc.sagedoc
import sage.version
def help(*args, **kwds):
if len(args) > 0 or len(kwds) > 0:
sage.misc.sagedoc.help(*args, **kwds)
else:
s = """
## Welcome to Sage %s!
- **Online documentation:** [View the Sage documentation online](http://www.sagemath.org/doc/).
- **Help:** For help on any object or function, for example `matrix_plot`, enter `matrix_plot?` followed by tab or shift+enter. For help on any module (or object or function), for example, `sage.matrix`, enter `help(sage.matrix)`.
- **Tab completion:** Type `obj` followed by tab to see all completions of obj. To see all methods you may call on `obj`, type `obj.` followed by tab.
- **Source code:** Enter `matrix_plot??` followed by tab or shift+enter to look at the source code of `matrix_plot`.
- **License information:** For license information about Sage and its components, enter `license()`."""%sage.version.version
salvus.md(s)
| gpl-3.0 |
LIKAIMO/MissionPlanner | Lib/site-packages/scipy/optimize/nonlin.py | 53 | 46004 | r"""
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
--------
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print 'Residual', abs(residual(sol)).max()
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError
from numpy import asarray, dot, vdot
if sys.platform != 'cli':
import scipy.sparse.linalg
import scipy.sparse
import scipy.lib.blas as blas
import inspect
else:
print "Warning: scipy.optimize.nonlin package is not supported under IronPython yet."
from linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov',
# Deprecated functions:
'broyden_generalized', 'anderson2', 'broyden3']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array-like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : array-like
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
if condition.check(Fx, x, dx):
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x += dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
raise NoConvergence(_array_like(x, x0))
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = maxnorm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return True
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return self.iteration > self.iter
# NB: condition must succeed for rtol=inf even if norm == 0
return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# autoscale the initial Jacobian parameter
self.alpha = 0.5*max(norm(x0), 1) / norm(f0)
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = blas.get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = blas.get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [vR]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='qr', econ=True)
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Extra parameters:
- ``to_retain`: number of SVD components to retain when
rank reduction is done. Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by numerical
differentiation:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [KK]_,
and for the LGMRES sparse inverse method, see [BJM]_.
References
----------
.. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = zip(args[-len(defaults):], defaults)
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that it's keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec wrapper in ns
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
# Deprecated functions
@np.deprecate
def broyden_generalized(*a, **kw):
"""Use *anderson(..., w0=0)* instead"""
kw.setdefault('w0', 0)
return anderson(*a, **kw)
@np.deprecate
def broyden1_modified(*a, **kw):
"""Use `broyden1` instead"""
return broyden1(*a, **kw)
@np.deprecate
def broyden_modified(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def anderson2(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def broyden3(*a, **kw):
"""Use `broyden2` instead"""
return broyden2(*a, **kw)
@np.deprecate
def vackar(*a, **kw):
"""Use `diagbroyden` instead"""
return diagbroyden(*a, **kw)
| gpl-3.0 |
cl4rke/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
jmontoyam/mne-python | mne/decoding/csp.py | 3 | 30425 | # -*- coding: utf-8 -*-
# Authors: Romain Trachel <trachelr@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Alexandre Barachant <alexandre.barachant@gmail.com>
# Clemens Brunner <clemens.brunner@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from scipy import linalg
from .mixin import TransformerMixin
from .base import BaseEstimator
from ..cov import _regularized_covariance
from ..utils import warn
class CSP(TransformerMixin, BaseEstimator):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This object can be used as a supervised decomposition to estimate
spatial filters for feature extraction in a 2 class decoding problem.
CSP in the context of EEG was first described in [1]; a comprehensive
tutorial on CSP can be found in [2]. Multiclass solving is implemented
from [3].
Parameters
----------
n_components : int, defaults to 4
The number of components to decompose M/EEG signals.
This number should be set by cross-validation.
reg : float | str | None, defaults to None
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
log : None | bool, defaults to None
If transform_into == 'average_power' and log is None or True, then
applies a log transform to standardize the features, else the features
are z-scored. If transform_into == 'csp_space', then log must be None.
cov_est : 'concat' | 'epoch', defaults to 'concat'
If 'concat', covariance matrices are estimated on concatenated epochs
for each class.
If 'epoch', covariance matrices are estimated on each epoch separately
and then averaged over each class.
transform_into : {'average_power', 'csp_space'}
If 'average_power' then self.transform will return the average power of
each spatial filter. If 'csp_space' self.transform will return the data
in CSP space. Defaults to 'average_power'.
Attributes
----------
``filters_`` : ndarray, shape (n_channels, n_channels)
If fit, the CSP components used to decompose the data, else None.
``patterns_`` : ndarray, shape (n_channels, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
``mean_`` : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
``std_`` : ndarray, shape (n_components,)
If fit, the std squared power for each component.
References
----------
[1] Zoltan J. Koles, Michael S. Lazar, Steven Z. Zhou. Spatial Patterns
Underlying Population Differences in the Background EEG. Brain
Topography 2(4), 275-284, 1990.
[2] Benjamin Blankertz, Ryota Tomioka, Steven Lemm, Motoaki Kawanabe,
Klaus-Robert Müller. Optimizing Spatial Filters for Robust EEG
Single-Trial Analysis. IEEE Signal Processing Magazine 25(1), 41-56,
2008.
[3] Grosse-Wentrup, Moritz, and Martin Buss. Multiclass common spatial
patterns and information theoretic feature extraction. IEEE
Transactions on Biomedical Engineering, Vol 55, no. 8, 2008.
"""
def __init__(self, n_components=4, reg=None, log=None, cov_est="concat",
transform_into='average_power'):
"""Init of CSP."""
# Init default CSP
if not isinstance(n_components, int):
raise ValueError('n_components must be an integer.')
self.n_components = n_components
# Init default regularization
if (
(reg is not None) and
(reg not in ['oas', 'ledoit_wolf']) and
((not isinstance(reg, (float, int))) or
(not ((reg <= 1.) and (reg >= 0.))))
):
raise ValueError('reg must be None, "oas", "ledoit_wolf" or a '
'float in between 0. and 1.')
self.reg = reg
# Init default cov_est
if not (cov_est == "concat" or cov_est == "epoch"):
raise ValueError("unknown covariance estimation method")
self.cov_est = cov_est
# Init default transform_into
if transform_into not in ('average_power', 'csp_space'):
raise ValueError('transform_into must be "average_power" or '
'"csp_space".')
self.transform_into = transform_into
# Init default log
if transform_into == 'average_power':
if log is not None and not isinstance(log, bool):
raise ValueError('log must be a boolean if transform_into == '
'"average_power".')
else:
if log is not None:
raise ValueError('log must be a None if transform_into == '
'"csp_space".')
self.log = log
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def fit(self, X, y, epochs_data=None):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the CSP.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
X = _check_deprecate(epochs_data, X)
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)."
% type(X))
self._check_Xy(X, y)
n_channels = X.shape[1]
self._classes = np.unique(y)
n_classes = len(self._classes)
if n_classes < 2:
raise ValueError("n_classes must be >= 2.")
covs = np.zeros((n_classes, n_channels, n_channels))
sample_weights = list()
for class_idx, this_class in enumerate(self._classes):
if self.cov_est == "concat": # concatenate epochs
class_ = np.transpose(X[y == this_class], [1, 0, 2])
class_ = class_.reshape(n_channels, -1)
cov = _regularized_covariance(class_, reg=self.reg)
weight = sum(y == this_class)
elif self.cov_est == "epoch":
class_ = X[y == this_class]
cov = np.zeros((n_channels, n_channels))
for this_X in class_:
cov += _regularized_covariance(this_X, reg=self.reg)
cov /= len(class_)
weight = len(class_)
# normalize by trace and stack
covs[class_idx] = cov / np.trace(cov)
sample_weights.append(weight)
if n_classes == 2:
eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0))
# sort eigenvectors
ix = np.argsort(np.abs(eigen_values - 0.5))[::-1]
else:
# The multiclass case is adapted from
# http://github.com/alexandrebarachant/pyRiemann
eigen_vectors, D = _ajd_pham(covs)
# Here we apply an euclidean mean. See pyRiemann for other metrics
mean_cov = np.average(covs, axis=0, weights=sample_weights)
eigen_vectors = eigen_vectors.T
# normalize
for ii in range(eigen_vectors.shape[1]):
tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov),
eigen_vectors[:, ii])
eigen_vectors[:, ii] /= np.sqrt(tmp)
# class probability
class_probas = [np.mean(y == _class) for _class in self._classes]
# mutual information
mutual_info = []
for jj in range(eigen_vectors.shape[1]):
aa, bb = 0, 0
for (cov, prob) in zip(covs, class_probas):
tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov),
eigen_vectors[:, jj])
aa += prob * np.log(np.sqrt(tmp))
bb += prob * (tmp ** 2 - 1)
mi = - (aa + (3.0 / 16) * (bb ** 2))
mutual_info.append(mi)
ix = np.argsort(mutual_info)[::-1]
# sort eigenvectors
eigen_vectors = eigen_vectors[:, ix]
self.filters_ = eigen_vectors.T
self.patterns_ = linalg.pinv(eigen_vectors)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X, epochs_data=None):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times)
"""
X = _check_deprecate(epochs_data, X)
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)." % type(X))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
if self.transform_into == 'average_power':
X = (X ** 2).mean(axis=-1)
log = True if self.log is None else self.log
if log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
def plot_patterns(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic patterns of CSP components.
The CSP patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def plot_filters(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic filters of CSP components.
The CSP filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def _ajd_pham(X, eps=1e-6, max_iter=15):
"""Approximate joint diagonalization based on Pham's algorithm.
This is a direct implementation of the PHAM's AJD algorithm [1].
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_channels)
A set of covariance matrices to diagonalize.
eps : float, defaults to 1e-6
The tolerance for stoping criterion.
max_iter : int, defaults to 1000
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
The diagonalizer.
D : ndarray, shape (n_epochs, n_channels, n_channels)
The set of quasi diagonal matrices.
References
----------
[1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive
definite Hermitian matrices." SIAM Journal on Matrix Analysis and
Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(max_iter):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, n_m / n_times, n_times)).transpose(1, 0, 2)
return V, D
def _check_deprecate(epochs_data, X):
"""Aux. function to CSP to deal with the change param name."""
if epochs_data is not None:
X = epochs_data
warn('epochs_data will be deprecated in mne 0.14. Use X instead')
return X
| bsd-3-clause |
walterreade/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
hsuantien/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
spallavolu/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
tagomatech/ETL | eia/eia.py | 1 | 1896 | # """ eia.py """
import numpy as np
import pandas as pd
import requests
import json
class EIA(object):
def __init__(self, token: str, series_id: str):
"""Fetch data from www.eia.gov
Attributes
----------
token: str
EIA token
series_id : str
series id
Methods
-------
getData(token: str, series_id: str)
Fetch data from website and return a pd.Series()
Examples
-------
# Get daily prices of natural gas, series id NG.RNGC1.D
# http://www.eia.gov/beta/api/qb.cfm?category=462457&sdid=NG.RNGC1.D
token = 'YOUR_EIA_TOKEN'
nat_gas = 'NG.RNGC1.D'
eia = EIA(token, nat_gas)
print(eia.getData())
"""
self.token = token
self.series_id = series_id
def getData(self) -> pd.Series:
# URL
url = 'http://api.eia.gov/series/?api_key={}&series_id={}'.format(self.token, self.series_id.upper())
# Fetch data
try:
r = requests.get(url)
jso = r.json()
dic = jso['series'][0]['data']
# Create series object
lst_dates = np.column_stack(dic)[0]
lst_values = np.column_stack(dic)[1]
data = pd.Series(data=lst_values,
index=lst_dates)
# Ensure timestamp format consistency across time frequencies
if len(data.index[0]) == 4:
data.index = [x + '0101' for x in data.index]
if len(data.index[0]) == 6:
data.index = [x + '01' for x in data.index]
data.index = pd.to_datetime(data.index, format='%Y%m%d')
data.name = self.series_id
return data
# Except anything
except Exception as e:
print(e)
| mit |
fuadcan/complexBI | 1_newscrawler/newscrawler/spiders/peopleschn.py | 1 | 4033 | from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from ..items import NewsItem
from datetime import datetime
import pandas as pd
import re
class PeoplesChinaSpider(CrawlSpider):
name = "peopleschina"
allowed_domains = ["people.cn"]
def __init__(self, yearmonth='', *args, **kwargs):
super(PeoplesChinaSpider, self).__init__(*args, **kwargs)
begin_date = pd.Timestamp(yearmonth + "-01")
end_date = pd.Timestamp(begin_date) + pd.DateOffset(months=1) - pd.DateOffset(days=1)
date_inds = [d.date().isoformat().replace("-","") for d in pd.date_range(begin_date,end_date)]
self.start_urls = ["http://en.people.cn/review/%s.html" % d for d in date_inds]
rules = (
Rule(LinkExtractor(allow=(), restrict_xpaths=('//div[@class="p1_left fl"]//a',
'//div[@class="p1_right fr"]//a',)), callback="parse_items", follow= True),
Rule(LinkExtractor(deny=('//div[@class="ad02 clear"]/a'), restrict_xpaths=()), callback="parse_items", follow= True),
)
def parse_items(self, response):
hxs = HtmlXPathSelector(response)
item = NewsItem()
item["link"] = response.request.url
item["lang"] = "en"
item["source"] = "PeoplesChina"
title = hxs.xpath('//span[@id="p_title"]/text()').extract()
if not title:
title = hxs.xpath('//div[@class="w980 wb_10 clear"]/h1//text()').extract()
if not title:
title = hxs.xpath('//div[@id="p_title"]//text()').extract()
intro = ""
author = hxs.xpath('//div[@class="wb_1 clear"]//text()').extract()
if not author:
author = hxs.xpath('//div[@class="w980 wb_10 clear"]/div//text()').extract()
category = hxs.xpath('//div[@class="w980 wbnav wbnav2 clear"]//a//text()').extract()
if not category:
category = hxs.xpath('//div[@id="p_navigator"]//text()').extract()
new_content = hxs.xpath('//div[@id="p_content"]/p//text()').extract()
if not new_content:
new_content = hxs.xpath('//div[@class="wb_12 clear"]/p//text()').extract()
if not new_content or len("".join(new_content))<10:
new_content = hxs.xpath('//span[@id="p_content"]/text()').extract()
if not new_content or len("".join(new_content))<10:
new_content = hxs.xpath('//div[@id="p_content"]//text()').extract()
if not new_content or len("".join(new_content))<10:
new_content = hxs.xpath('//font[@class="fbody"]//text()').extract()
author = "".join(author)
date_time = author
if not date_time or len(date_time)<2:
date_time = hxs.xpath('//div[@id="p_publishtime"]//text()').extract()
date_time = "".join(date_time)
if not date_time or len(date_time)<2:
date_time = hxs.xpath('//span[@id="p_publishtime"]//text()').extract()
date_time = "".join(date_time)
print date_time
if not author:
author = hxs.xpath('//h3[@class="wb_2 clear"]//text()').extract()
author = "".join(author)
#
# Processing outputs
author = re.findall('^.*[)]',author)
author = [re.sub('^By\s','',a) for a in author]
new_content = [p for p in new_content if not re.search(u'\u2022',p)]
new_content = [p for p in new_content if not re.search('font-family|background-color:',p)]
new_content = ' '.join(new_content)
new_content = re.sub('\n','',new_content)
item["content"] = re.sub('\s{2,}',' ',new_content)
author = re.sub("[)()]","",'|'.join(author))
item["category"] = '|'.join(category)
item["intro"] = ""
item["title"] = ' '.join(title)
date_time = re.findall('[0-9]+:[0-9]{2}.*',date_time)
item["date_time"] = ''.join(date_time)
item["author"] = author
return(item)
| apache-2.0 |
Featuretools/featuretools | featuretools/demo/mock_customer.py | 1 | 4323 | import pandas as pd
from numpy import random
from numpy.random import choice
import featuretools as ft
from featuretools.variable_types import Categorical, ZIPCode
def load_mock_customer(n_customers=5, n_products=5, n_sessions=35, n_transactions=500,
random_seed=0, return_single_table=False, return_entityset=False):
"""Return dataframes of mock customer data"""
random.seed(random_seed)
last_date = pd.to_datetime('12/31/2013')
first_date = pd.to_datetime('1/1/2008')
first_bday = pd.to_datetime('1/1/1970')
join_dates = [random.uniform(0, 1) * (last_date - first_date) + first_date
for _ in range(n_customers)]
birth_dates = [random.uniform(0, 1) * (first_date - first_bday) + first_bday
for _ in range(n_customers)]
customers_df = pd.DataFrame({"customer_id": range(1, n_customers + 1)})
customers_df["zip_code"] = choice(["60091", "13244"], n_customers,)
customers_df["join_date"] = pd.Series(join_dates).dt.round('1s')
customers_df["date_of_birth"] = pd.Series(birth_dates).dt.round('1d')
products_df = pd.DataFrame({"product_id": pd.Categorical(range(1, n_products + 1))})
products_df["brand"] = choice(["A", "B", "C"], n_products)
sessions_df = pd.DataFrame({"session_id": range(1, n_sessions + 1)})
sessions_df["customer_id"] = choice(customers_df["customer_id"], n_sessions)
sessions_df["device"] = choice(["desktop", "mobile", "tablet"], n_sessions)
transactions_df = pd.DataFrame({"transaction_id": range(1, n_transactions + 1)})
transactions_df["session_id"] = choice(sessions_df["session_id"], n_transactions)
transactions_df = transactions_df.sort_values("session_id").reset_index(drop=True)
transactions_df["transaction_time"] = pd.date_range('1/1/2014', periods=n_transactions, freq='65s') # todo make these less regular
transactions_df["product_id"] = pd.Categorical(choice(products_df["product_id"], n_transactions))
transactions_df["amount"] = random.randint(500, 15000, n_transactions) / 100
# calculate and merge in session start
# based on the times we came up with for transactions
session_starts = transactions_df.drop_duplicates("session_id")[["session_id", "transaction_time"]].rename(columns={"transaction_time": "session_start"})
sessions_df = sessions_df.merge(session_starts)
if return_single_table:
return transactions_df.merge(sessions_df).merge(customers_df).merge(products_df).reset_index(drop=True)
elif return_entityset:
es = ft.EntitySet(id="transactions")
es = es.entity_from_dataframe(entity_id="transactions",
dataframe=transactions_df,
index="transaction_id",
time_index="transaction_time",
variable_types={"product_id": Categorical})
es = es.entity_from_dataframe(entity_id="products",
dataframe=products_df,
index="product_id")
es = es.entity_from_dataframe(entity_id="sessions",
dataframe=sessions_df,
index="session_id",
time_index="session_start")
es = es.entity_from_dataframe(entity_id="customers",
dataframe=customers_df,
index="customer_id",
time_index="join_date",
variable_types={"zip_code": ZIPCode})
rels = [ft.Relationship(es["products"]["product_id"],
es["transactions"]["product_id"]),
ft.Relationship(es["sessions"]["session_id"],
es["transactions"]["session_id"]),
ft.Relationship(es["customers"]["customer_id"],
es["sessions"]["customer_id"])]
es = es.add_relationships(rels)
es.add_last_time_indexes()
return es
return {"customers": customers_df,
"sessions": sessions_df,
"transactions": transactions_df,
"products": products_df}
| bsd-3-clause |
Diyago/Machine-Learning-scripts | DEEP LEARNING/Kaggle Avito Demand Prediction Challenge/text embeddings.py | 1 | 2308 | # @Kmike `s code
# https://github.com/deepmipt/DeepPavlov/blob/a59703de60deda349fc39918a1fc1b242638b7f7/pretrained-vectors.md
from tqdm import tqdm
import numpy as np # linear algebra
import pandas as pd
# read embeding
def embeding_reading(path):
embeddings_index = {}
f = open(path)
for line in f:
values = line.split(" ")[:-1]
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
return embeddings_index
def text2features(embeddings_index, text):
vec_stack = []
for w in nltk.word_tokenize(text.lower()):
v = embeddings_index.get(w, None)
if v is not None:
vec_stack.append(v)
if len(vec_stack) != 0:
v_mean = np.mean(vec_stack, axis=0)
else:
v_mean = np.zeros(300)
return v_mean
def df_to_embed_features(df, column, embeddings_index):
embed_size = 300
X = np.zeros((df.shape[0], embed_size), dtype="float32")
for i, text in tqdm(enumerate(df[column])):
X[i] = text2features(embeddings_index, text)
return X
path = "/mnt/nvme/jupyter/avito/embeding/ft_native_300_ru_wiki_lenta_lower_case.vec"
embeddings_index = embeding_reading(path)
X = df_to_embed_features(test_df, column="title", embeddings_index=embeddings_index)
# 2nd aproach @artgor aproach
def load_emb(embedding_path, tokenizer, max_features, default=False, embed_size=300):
"""Load embeddings."""
fasttext_model = FastText.load(embedding_path)
word_index = tokenizer.word_index
# my pretrained embeddings have different index, so need to add offset.
if default:
nb_words = min(max_features, len(word_index))
else:
nb_words = min(max_features, len(word_index)) + 2
embedding_matrix = np.zeros((nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
try:
embedding_vector = fasttext_model[word]
except KeyError:
embedding_vector = None
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
embedding_matrix = nn_functions.load_emb(
"f:/Avito/embeddings/avito_big_150m_sg1.w2v", tokenizer, max_features, embed_size
)
| apache-2.0 |
jueqingsizhe66/image_registration | image_registration/tests/measure_expected_offsets.py | 3 | 8628 | from image_registration.cross_correlation_shifts import cross_correlation_shifts
from image_registration.register_images import register_images
from image_registration import chi2_shifts
from image_registration.fft_tools import dftups,upsample_image,shift,smooth
from image_registration.tests import registration_testing as rt
import numpy as np
import matplotlib.pyplot as pl
import time
from functools import wraps
def print_timing(func):
@wraps(func)
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
print '%s took %0.5g s' % (func.func_name, (t2-t1))
return res
return wrapper
def test_measure_offsets(xsh, ysh, imsize, noise_taper=False, noise=0.5, chi2_shift=chi2_shifts.chi2_shift):
image = rt.make_extended(imsize)
offset_image = rt.make_offset_extended(image, xsh, ysh, noise_taper=noise_taper, noise=noise)
if noise_taper:
noise = noise/rt.edge_weight(imsize)
else:
noise = noise
return chi2_shift(image,offset_image,noise,return_error=True,upsample_factor='auto')
@print_timing
def montecarlo_test_offsets(xsh, ysh, imsize, noise_taper=False, noise=0.5, nsamples=100, **kwargs):
results = [test_measure_offsets(xsh, ysh, imsize, noise_taper=noise_taper,
noise=noise, **kwargs)
for ii in xrange(nsamples)]
xoff,yoff,exoff,eyoff = zip(*results)
return xoff,yoff,exoff,eyoff
def plot_montecarlo_test_offsets(xsh, ysh, imsize, noise=0.5, name="", **kwargs):
xoff,yoff,exoff,eyoff = montecarlo_test_offsets(xsh,ysh,imsize,noise=noise,**kwargs)
pl.plot(xsh,ysh,'k+')
means = [np.mean(x) for x in (xoff,yoff,exoff,eyoff)]
stds = [np.std(x) for x in (xoff,yoff,exoff,eyoff)]
pl.plot(xoff,yoff,',',label=name)
pl.errorbar(means[0],means[1],xerr=stds[0],yerr=stds[1],label=name+"$\mu+\sigma$")
pl.errorbar(means[0],means[1],xerr=means[2],yerr=means[3],label=name+"$\mu+$\mu(\sigma)$")
#pl.legend(loc='best')
return xoff,yoff,exoff,eyoff,means,stds
def montecarlo_tests_of_imsize(xsh,ysh,imsizerange=[15,105,5],noise=0.5,
figstart=0, clear=True, namepre="", **kwargs):
"""
Perform many monte-carlo tests as a function of the image size
"""
pl.figure(figstart+1)
if clear: pl.clf()
means_of_imsize = []
stds_of_imsize = []
for imsize in xrange(*imsizerange):
print "Image Size = %i. " % imsize,
xoff,yoff,exoff,eyoff,means,stds = plot_montecarlo_test_offsets(xsh,
ysh, imsize, noise=noise, name=namepre+"%i "%imsize, **kwargs)
means_of_imsize.append(means)
stds_of_imsize.append(stds)
imsizes = np.arange(*imsizerange)
pl.figure(figstart+2)
if clear: pl.clf()
xmeans,ymeans,exmeans,eymeans = np.array(means_of_imsize).T
xstds,ystds,exstds,eystds = np.array(stds_of_imsize).T
pl.plot(imsizes,exmeans,label='$\\bar{\sigma_{x}}$')
pl.plot(imsizes,eymeans,label='$\\bar{\sigma_{y}}$')
pl.plot(imsizes,xstds,label='${\sigma_{x}(\mu)}$')
pl.plot(imsizes,ystds,label='${\sigma_{y}(\mu)}$')
pl.xlabel("Image Sizes")
pl.ylabel("X and Y errors")
pl.title("Noise Level = %f" % noise)
pl.figure(figstart+3)
if clear: pl.clf()
pl.plot(imsizes,exmeans/xstds,label='$\\bar{\sigma_{x}} / \sigma_{x}(\mu)$')
pl.plot(imsizes,eymeans/ystds,label='$\\bar{\sigma_{y}} / \sigma_{y}(\mu)$')
pl.xlabel("Image Sizes")
pl.ylabel("Ratio of measured to monte-carlo error")
pl.title("Noise Level = %f" % noise)
print "Ratio mean measure X error to monte-carlo X standard dev: ", np.mean(exmeans/xstds)
print "Ratio mean measure Y error to monte-carlo Y standard dev: ", np.mean(eymeans/ystds)
return np.array(means_of_imsize).T,np.array(stds_of_imsize).T
def monte_carlo_tests_of_noiselevel(xsh,ysh,noiselevels,imsize=25, figstart=0, clear=True,
namepre="", **kwargs):
pl.figure(figstart+1)
if clear: pl.clf()
means_of_noise = []
stds_of_noise = []
for noise in noiselevels:
print "Noise Level = %f. " % noise,
xoff,yoff,exoff,eyoff,means,stds = plot_montecarlo_test_offsets(xsh,
ysh, imsize, noise=noise, name=namepre+"%0.2f "%noise, **kwargs)
means_of_noise.append(means)
stds_of_noise.append(stds)
noises = noiselevels
pl.figure(figstart+2)
if clear: pl.clf()
xmeans,ymeans,exmeans,eymeans = np.array(means_of_noise).T
xstds,ystds,exstds,eystds = np.array(stds_of_noise).T
pl.plot(noises,exmeans,label='$\\bar{\sigma_{x}}$')
pl.plot(noises,eymeans,label='$\\bar{\sigma_{y}}$')
pl.plot(noises,xstds,label='${\sigma_{x}(\mu)}$')
pl.plot(noises,ystds,label='${\sigma_{y}(\mu)}$')
pl.xlabel("Noise Levels")
pl.ylabel("X and Y errors")
pl.title("Image Size = %i" % imsize)
pl.figure(figstart+3)
if clear: pl.clf()
pl.plot(noises,exmeans/xstds,label='$\\bar{\sigma_{x}} / \sigma_{x}(\mu)$')
pl.plot(noises,eymeans/ystds,label='$\\bar{\sigma_{y}} / \sigma_{y}(\mu)$')
pl.xlabel("Noise Levels")
pl.ylabel("Ratio of measured to monte-carlo error")
pl.title("Image Size = %i" % imsize)
print "Ratio mean measure X error to monte-carlo X standard dev: ", np.mean(exmeans/xstds)
print "Ratio mean measure Y error to monte-carlo Y standard dev: ", np.mean(eymeans/ystds)
return np.array(means_of_noise).T,np.array(stds_of_noise).T
def centers_to_edges(arr):
dx = arr[1]-arr[0]
newarr = np.linspace(arr.min()-dx/2,arr.max()+dx/2,arr.size+1)
return newarr
def monte_carlo_tests_of_both(xsh,ysh,noiselevels, imsizes, figstart=12,
clear=True, namepre="", **kwargs):
pl.figure(figstart+1)
pl.clf()
pars = [[plot_montecarlo_test_offsets(xsh, ysh, imsize, noise=noise,
name=namepre+"%0.2f "%noise, **kwargs)
for noise in noiselevels]
for imsize in imsizes]
means = np.array([[p[4] for p in a] for a in pars])
stds = np.array([[p[5] for p in a] for a in pars])
pl.subplot(221)
pl.title("$\sigma_x$ means")
pl.pcolormesh(centers_to_edges(noiselevels),centers_to_edges(imsizes),means[:,:,2])
pl.subplot(222)
pl.title("$\sigma_y$ means")
pl.pcolormesh(centers_to_edges(noiselevels),centers_to_edges(imsizes),means[:,:,3])
pl.subplot(223)
pl.title("$\mu_x$ stds")
pl.pcolormesh(centers_to_edges(noiselevels),centers_to_edges(imsizes),stds[:,:,0])
pl.subplot(224)
pl.title("$\mu_y$ stds")
pl.pcolormesh(centers_to_edges(noiselevels),centers_to_edges(imsizes),stds[:,:,1])
for ii in xrange(1,5):
pl.subplot(2,2,ii)
pl.xlabel("Noise Level")
pl.ylabel("Image Size")
return np.array(means),np.array(stds)
def perform_tests(nsamples=100):
moi,soi = montecarlo_tests_of_imsize(3.7, -1.2, figstart=0,
chi2_shift=chi2_shifts.chi2_shift,
imsizerange=[15, 105, 5],nsamples=nsamples)
moiB,soiB = montecarlo_tests_of_imsize(-9.1, 15.9, figstart=0, clear=False,
chi2_shift=chi2_shifts.chi2_shift, imsizerange=[15,105,5],
nsamples=nsamples)
moi2,soi2 = montecarlo_tests_of_imsize(3.7, -1.2, figstart=3,
chi2_shift=chi2_shifts.chi2_shift_iterzoom,
imsizerange=[15, 105, 3],nsamples=nsamples)
moi2B,soi2B = montecarlo_tests_of_imsize(-9.1, 15.9, figstart=3,
clear=False, chi2_shift=chi2_shifts.chi2_shift_iterzoom,
imsizerange=[15,105,3], nsamples=nsamples)
mos,sos = monte_carlo_tests_of_noiselevel(3.7, -1.2, figstart=6,
chi2_shift=chi2_shifts.chi2_shift,
noiselevels=np.logspace(-1,1),nsamples=nsamples)
mosB,sosB = monte_carlo_tests_of_noiselevel(-9.1, 15.9, figstart=6,
clear=False, chi2_shift=chi2_shifts.chi2_shift,
noiselevels=np.logspace(-1,1), nsamples=nsamples)
mos2,sos2 = monte_carlo_tests_of_noiselevel(3.7, -1.2, figstart=9,
chi2_shift=chi2_shifts.chi2_shift_iterzoom,
noiselevels=np.logspace(-1,1), nsamples=nsamples)
mos2B,sos2B = monte_carlo_tests_of_noiselevel(-9.1, 15.9, figstart=9,
clear=False, chi2_shift=chi2_shifts.chi2_shift_iterzoom,
noiselevels=np.logspace(-1,1), nsamples=nsamples)
return locals()
def twod_tests(nsamples=100):
mob,sob = monte_carlo_tests_of_both(3.7, -1.2, np.linspace(0.1,10,10),
np.arange(15,105,5), figstart=12, clear=True, namepre="",
chi2_shift=chi2_shifts.chi2_shift_iterzoom, nsamples=nsamples)
| mit |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 37 | 26758 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
btabibian/scikit-learn | sklearn/utils/graph.py | 7 | 3094 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
from .deprecation import deprecated
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph : sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> list(sorted(single_source_shortest_path_length(graph, 0).items()))
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> graph = np.ones((6, 6))
>>> list(sorted(single_source_shortest_path_length(graph, 2).items()))
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
@deprecated("sklearn.utils.graph.connected_components was deprecated in "
"version 0.19 and will be removed in 0.21. Use "
"scipy.sparse.csgraph.connected_components instead.")
def connected_components(*args, **kwargs):
return sparse.csgraph.connected_components(*args, **kwargs)
@deprecated("sklearn.utils.graph.graph_laplacian was deprecated in version "
"0.19 and will be removed in 0.21. Use "
"scipy.sparse.csgraph.laplacian instead.")
def graph_laplacian(*args, **kwargs):
return sparse.csgraph.laplacian(*args, **kwargs)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_source_label_time_frequency.py | 9 | 3756 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
tjmassin/gwdetchar | gwdetchar/lasso/tests/test_core.py | 3 | 3032 | # -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gwdetchar. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwdetchar.lasso`
"""
import numpy
from numpy import testing as nptest
from gwpy.timeseries import (TimeSeries, TimeSeriesDict)
from .. import core
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
# global test objects
IND = 8
OUTLIER_IN = numpy.random.normal(loc=0, scale=1, size=1024)
OUTLIER_IN[IND] = 100
OUTLIER_TS = TimeSeries(OUTLIER_IN, sample_rate=1024, unit='Mpc',
name='X1:TEST_RANGE')
TARGET = numpy.array([-1, 0, 1])
SERIES = TimeSeries(TARGET, sample_rate=1, epoch=-1)
DATA = numpy.array([SERIES.value]).T
TSDICT = TimeSeriesDict({
'full': SERIES,
'flat': TimeSeries(numpy.ones(3), sample_rate=1, epoch=0),
'nan': TimeSeries(numpy.full(3, numpy.nan), sample_rate=1, epoch=0),
})
# -- unit tests ---------------------------------------------------------------
def test_find_outliers():
# find expected outliers
outliers = core.find_outliers(OUTLIER_TS)
assert isinstance(outliers, numpy.ndarray)
nptest.assert_array_equal(outliers, numpy.array([IND]))
def test_remove_outliers():
# strip off outliers
core.remove_outliers(OUTLIER_TS)
assert OUTLIER_TS[IND] - OUTLIER_TS.mean() <= 5 * OUTLIER_TS.std()
def test_fit():
# adapted from unit tests for sklearn.linear_model
model = core.fit(DATA, TARGET, alpha=1e-8)
assert model.alpha == 1e-8
nptest.assert_almost_equal(model.coef_, [1])
nptest.assert_almost_equal(model.dual_gap_, 0)
nptest.assert_almost_equal(model.predict([[0], [1]]), [0, 1])
def test_find_alpha():
# find the optimal alpha parameter
alpha = core.find_alpha(DATA, TARGET)
assert alpha == 0.1
def test_remove_flat():
# remove flat TimeSeries
tsdict = core.remove_flat(TSDICT)
assert len(tsdict.keys()) == 2
assert 'flat' not in tsdict.keys()
nptest.assert_array_equal(tsdict['full'].value, TSDICT['full'].value)
nptest.assert_array_equal(tsdict['nan'].value, TSDICT['nan'].value)
def test_remove_bad():
# remove unscalable TimeSeries
tsdict = core.remove_bad(TSDICT)
assert len(tsdict.keys()) == 2
assert 'nan' not in tsdict.keys()
nptest.assert_array_equal(tsdict['full'].value, TSDICT['full'].value)
nptest.assert_array_equal(tsdict['flat'].value, TSDICT['flat'].value)
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/mplot3d/contourf3d_2.py | 1 | 1077 | '''
======================================
Projecting filled contour onto a graph
======================================
Demonstrates displaying a 3D surface while also projecting filled contour
'profiles' onto the 'walls' of the graph.
See contour3d_demo2 for the unfilled version.
'''
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
# Plot the 3D surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
# Plot projections of the contours for each dimension. By choosing offsets
# that match the appropriate axes limits, the projected contours will sit on
# the 'walls' of the graph
cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/neighbors/classification.py | 15 | 14338 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| mit |
jakejhansen/minesweeper_solver | policy_gradients/test_condensed_v4.py | 1 | 8720 | # review solution
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import relu, softmax
import gym
import sys
import os
sys.path.append('../')
from minesweeper_tk import Minesweeper
model = "condensed_6x6_v4"
# training settings
epochs = 100000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.000002 # you know this by now #0.001,
#4400: 56% win --> LR: 0.0003
#5600: 69% win --> LR: 0.0001
#7200: 74% win --> LR: 0.00003
#8400: 77% win --> LR: 0.00001
#9600: 75% win --> LR: 0.000005
#10400: 75% win --> LR: 0.000002
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
""" condensed
epochs = 100000 # number of training batches
batch_size = 400 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.00004 # you know this by now #0.0005
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
""" 261 epocs to learn 2 specific board (overfit)
epochs = 10000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 130 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.001 # you know this by now
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
# setup policy network
n = 6
n_inputs = 6*6*2
n_hidden = 6*6*8
n_hidden2 = 220
n_hidden3 = 220
n_hidden4 = 220
n_outputs = 6*6
dropout = 0
tf.reset_default_graph()
states_pl = tf.placeholder(tf.float32, [None, n_inputs], name='states_pl')
actions_pl = tf.placeholder(tf.int32, [None, 2], name='actions_pl')
advantages_pl = tf.placeholder(tf.float32, [None], name='advantages_pl')
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate_pl')
input_layer = tf.reshape(states_pl, [-1, n, n, 2])
conv1 = tf.layers.conv2d(inputs=input_layer,filters=18,kernel_size=[5, 5],padding="same", activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,filters=36,kernel_size=[3, 3],padding="same", activation=tf.nn.relu)
conv2_flat = tf.contrib.layers.flatten(conv2)
l_hidden = tf.layers.dense(inputs=conv2_flat, units=n_hidden, activation=relu, name='l_hidden')
l_hidden2 = tf.layers.dense(inputs=l_hidden, units=n_hidden2, activation=relu, name='l_hidden2')
l_hidden2 = tf.layers.dropout(l_hidden2, rate=dropout)
l_hidden3 = tf.layers.dense(inputs=l_hidden2, units=n_hidden3, activation=relu, name='l_hidden3')
l_hidden3 = tf.layers.dropout(l_hidden3, rate=dropout)
#l_hidden4 = tf.layers.dense(inputs=l_hidden3, units=n_hidden4, activation=relu, name='l_hidden4')
l_hidden3 = tf.layers.dropout(l_hidden3, rate=dropout)
l_out = tf.layers.dense(inputs=l_hidden3, units=n_outputs, activation=softmax, name='l_out')
# print network
print('states_pl:', states_pl.get_shape())
print('actions_pl:', actions_pl.get_shape())
print('advantages_pl:', advantages_pl.get_shape())
print('l_hidden:', l_hidden.get_shape())
print('l_hidden2:', l_hidden2.get_shape())
print('l_hidden3:', l_hidden3.get_shape())
print('l_out:', l_out.get_shape())
# define loss and optimizer
loss_f = -tf.reduce_mean(tf.multiply(tf.log(tf.gather_nd(l_out, actions_pl)), advantages_pl))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_pl, beta1=0.8, beta2=0.92)
train_f = optimizer.minimize(loss_f)
saver = tf.train.Saver() # we use this later to save the model
# test forward pass
from minesweeper_tk import Minesweeper
env = Minesweeper(display=False, ROWS = 6, COLS = 6, MINES = 7, OUT = "CONDENSED", rewards = {"win" : 1, "loss" : -1, "progress" : 0.9, "noprogress" : -0.3, "YOLO" : -0.3})
state = env.stateConverter(env.get_state()).flatten()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
action_probabilities = sess.run(fetches=l_out, feed_dict={states_pl: [state]})
print(action_probabilities)
# helper functions
def get_rollout(sess, env, rollout_limit=None, stochastic=False, seed=None):
"""Generate rollout by iteratively evaluating the current policy on the environment."""
rollout_limit = rollout_limit
env.reset()
s = env.stateConverter(env.get_state()).flatten()
states, actions, rewards = [], [], []
for i in range(rollout_limit):
a = get_action(sess, s, stochastic)
s1, r, done, _ = env.step(a)
states.append(s)
actions.append(a)
rewards.append(r)
s = s1
if done: break
return states, actions, rewards, i+1
def get_action(sess, state, stochastic=False):
"""Choose an action, given a state, with the current policy network."""
# get action probabilities
a_prob = sess.run(fetches=l_out, feed_dict={states_pl: np.atleast_2d(state)})
#valid_moves = env.get_validMoves()
#a_prob[~valid_moves.flatten().reshape(1,36)] = 0
#a_prob[a_prob < 0.00001] = 0.000001
#a_prob / np.sum(a_prob)
#a_prob = normalize(a_prob, norm = 'l1')
#if abs(1-np.sum(a_prob)) > 0.01:
# a_prob = sess.run(fetches=l_out, feed_dict={states_pl: np.atleast_2d(state)})
if stochastic:
# sample action from distribution
return (np.cumsum(np.asarray(a_prob)) > np.random.rand()).argmax()
else:
# select action with highest probability
return a_prob.argmax()
def get_advantages(rewards, rollout_limit, discount_factor, eps=1e-12):
"""Compute advantages"""
returns = get_returns(rewards, rollout_limit, discount_factor)
# standardize columns of returns to get advantages
advantages = (returns - np.mean(returns, axis=0)) / (np.std(returns, axis=0) + eps)
# restore original rollout lengths
advantages = [adv[:len(rewards[i])] for i, adv in enumerate(advantages)]
return advantages
def get_returns(rewards, rollout_limit, discount_factor):
"""Compute the cumulative discounted rewards, a.k.a. returns."""
returns = np.zeros((len(rewards), rollout_limit))
for i, r in enumerate(rewards):
returns[i, len(r) - 1] = r[-1]
for j in reversed(range(len(r)-1)):
returns[i,j] = r[j] + discount_factor * returns[i,j+1]
return returns
import time
if __name__ == "__main__":
import time
display = False
env = Minesweeper(display=display, ROWS = 6, COLS = 6, MINES = 6, OUT = "CONDENSED", rewards = {"win" : 1, "loss" : -1, "progress" : 0.1, "noprogress" : -0.3, "YOLO": -0.3})
i = 0
#start = time.time()
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, "{}/{}_best.ckpt".format(model,model))
#view = Viewer(env, custom_render=True)
games = 0
moves = 0
stuck = 0
won_games = 0
lost_games = 0
while games < 10000:
if games % 500 == 0:
print(games)
r = 1
r_prev = 1
while True:
if display:
input()
s = env.stateConverter(env.get_state()).flatten()
if r < 0:
a = get_action(sess, s, stochastic=True)
else:
a = get_action(sess, s, stochastic=False)
moves += 1
r_prev = r
s, r, done, _ = env.step(a)
s = s.flatten()
if display:
print("Reward = ", r)
#print("\nReward = {}".format(r))
if r == 1:
won_games += 1
if r == -1:
lost_games += 1
if done:
games += 1
env.reset()
moves = 0
break
elif moves >= 30:
stuck += 1
games += 1
env.lost = env.lost + 1
env.reset()
moves = 0
break
#print(env.lost)
#print(env.won)
print("games: {}, won: {}, lost: {}, stuck: {}, win_rate : {:.1f}%".format(games, won_games, lost_games, stuck, won_games/games * 100))
#view.render(close=True, display_gif=True)
#69% win-rate for 54000 epochs
| mit |
zhujianwei31415/dcnnfold | scripts/evaluation/plot_specificity_sensitivity.py | 2 | 2037 | #!/usr/bin/env python
import sys
if len(sys.argv) != 3:
sys.exit('%s <results_dir> <level(family, superfamily, fold)>' % sys.argv[0])
results_dir = sys.argv[1]
level = sys.argv[2]
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
# read in score function
def read_spec_sens(score_file):
spec, sens = [], []
with open(score_file, 'r') as fin:
for line in fin:
cols = line.split()
spec.append(float(cols[0]))
sens.append(float(cols[1]))
return np.array(spec), np.array(sens)
# read in score
# setup colors
colors = ['red', 'blue', 'green', 'gold', 'maroon', 'purple', 'black', 'c', 'm']
names = ['DeepFRpro', 'DeepFR', 'RFDN-Fold', 'DN-Fold', 'DN-FoldS', 'RF-Fold', 'FOLDpro', 'HMMER', 'THREADER']
files = ['%s/%s/spec-sens-%s' % (results_dir, i, level) for i in names]
# Binarize the output
n_classes = len(files)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
for i in range(n_classes):
precision[i], recall[i] = read_spec_sens(files[i])
## filter by threshold
#threshold=0.01
#for i in range(n_classes):
# precision[i][recall[i]<threshold] = 1
# recall[i][recall[i]<threshold] = 0
# Turn interactive plotting off
plt.ioff()
# set figure
plt.figure(figsize=(16, 16), dpi=100)
# Plot Precision-Recall curve for each class
for i, color in zip(range(n_classes), colors):
if i < 2:
plt.plot(precision[i], recall[i], color=color, lw=3, label='{0}'.format(names[i]))
else:
plt.plot(precision[i], recall[i], color=color, lw=2, label='{0}'.format(names[i]))
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
plt.xlim([0, 1.05])
plt.ylim([-0.05, 1])
plt.xlabel('Specificity', fontsize=24)
plt.ylabel('Sensitivity', fontsize=24)
#plt.title('Specificity-Sensitivity curve to multi-class')
plt.legend(loc="upper right", fontsize=16)
plt.grid()
#plt.show()
plt.savefig('%s.eps' % level, format='eps', bbox_inches='tight')
| gpl-3.0 |
Ziqi-Li/bknqgis | pandas/pandas/tests/indexes/period/test_formats.py | 15 | 1545 | from pandas import PeriodIndex
import numpy as np
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = PeriodIndex(['2017-01-01', '2017-01-02',
'2017-01-03'], freq='D')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype='<U10')
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype='<U10')
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype='<U10')
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(['2017-01-01', pd.NaT, '2017-01-03'], freq='D')
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
| gpl-2.0 |
pauldeng/nilmtk | nilmtk/utils.py | 3 | 9850 | from __future__ import print_function, division
import numpy as np
import pandas as pd
import networkx as nx
from copy import deepcopy
from os.path import isdir, dirname, abspath
from os import getcwd
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding, stdout
from IPython.core.display import HTML, display
from collections import OrderedDict
import datetime
from nilmtk.datastore import DataStore, HDFDataStore, CSVDataStore, Key
def show_versions():
"""Prints versions of various dependencies"""
output = OrderedDict()
output["Date"] = str(datetime.datetime.now())
import sys
import platform
output["Platform"] = str(platform.platform())
system_information = sys.version_info
output["System version"] = "{}.{}".format(system_information.major,
system_information.minor)
PACKAGES = ["nilmtk", "nilm_metadata", "numpy", "matplotlib", "pandas", "sklearn"]
for package_name in PACKAGES:
key = package_name + " version"
try:
exec("import " + package_name)
except ImportError:
output[key] = "Not found"
else:
output[key] = eval(package_name + ".__version__")
try:
print(pd.show_versions())
except:
pass
else:
print("")
for k, v in output.iteritems():
print("{}: {}".format(k, v))
def timedelta64_to_secs(timedelta):
"""Convert `timedelta` to seconds.
Parameters
----------
timedelta : np.timedelta64
Returns
-------
float : seconds
"""
if len(timedelta) == 0:
return np.array([])
else:
return timedelta / np.timedelta64(1, 's')
def tree_root(graph):
"""Returns the object that is the root of the tree.
Parameters
----------
graph : networkx.Graph
"""
# from http://stackoverflow.com/a/4123177/732596
assert isinstance(graph, nx.Graph)
roots = [node for node,in_degree in graph.in_degree_iter() if in_degree==0]
n_roots = len(roots)
if n_roots > 1:
raise RuntimeError('Tree has more than one root!')
if n_roots == 0:
raise RuntimeError('Tree has no root!')
return roots[0]
def nodes_adjacent_to_root(graph):
root = tree_root(graph)
return graph.successors(root)
def index_of_column_name(df, name):
for i, col_name in enumerate(df.columns):
if col_name == name:
return i
raise KeyError(name)
def find_nearest(known_array, test_array):
"""Find closest value in `known_array` for each element in `test_array`.
Parameters
----------
known_array : numpy array
consisting of scalar values only; shape: (m, 1)
test_array : numpy array
consisting of scalar values only; shape: (n, 1)
Returns
-------
indices : numpy array; shape: (n, 1)
For each value in `test_array` finds the index of the closest value
in `known_array`.
residuals : numpy array; shape: (n, 1)
For each value in `test_array` finds the difference from the closest
value in `known_array`.
"""
# from http://stackoverflow.com/a/20785149/732596
index_sorted = np.argsort(known_array)
known_array_sorted = known_array[index_sorted]
idx1 = np.searchsorted(known_array_sorted, test_array)
idx2 = np.clip(idx1 - 1, 0, len(known_array_sorted)-1)
idx3 = np.clip(idx1, 0, len(known_array_sorted)-1)
diff1 = known_array_sorted[idx3] - test_array
diff2 = test_array - known_array_sorted[idx2]
indices = index_sorted[np.where(diff1 <= diff2, idx3, idx2)]
residuals = test_array - known_array[indices]
return indices, residuals
def container_to_string(container, sep='_'):
if isinstance(container, str):
string = container
else:
try:
string = sep.join([str(element) for element in container])
except TypeError:
string = str(container)
return string
def simplest_type_for(values):
n_values = len(values)
if n_values == 1:
return list(values)[0]
elif n_values == 0:
return
else:
return tuple(values)
def flatten_2d_list(list2d):
list1d = []
for item in list2d:
if isinstance(item, basestring):
list1d.append(item)
else:
try:
len(item)
except TypeError:
list1d.append(item)
else:
list1d.extend(item)
return list1d
def get_index(data):
"""
Parameters
----------
data : pandas.DataFrame or Series or DatetimeIndex
Returns
-------
index : the index for the DataFrame or Series
"""
if isinstance(data, (pd.DataFrame, pd.Series)):
index = data.index
elif isinstance(data, pd.DatetimeIndex):
index = data
else:
raise TypeError('wrong type for `data`.')
return index
def convert_to_timestamp(t):
"""
Parameters
----------
t : str or pd.Timestamp or datetime or None
Returns
-------
pd.Timestamp or None
"""
return None if t is None else pd.Timestamp(t)
def get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
def dict_to_html(dictionary):
def format_string(value):
try:
if isinstance(value, basestring) and 'http' in value:
html = '<a href="{url}">{url}</a>'.format(url=value)
else:
html = '{}'.format(value)
except UnicodeEncodeError:
html = ''
return html
html = '<ul>'
for key, value in dictionary.iteritems():
html += '<li><strong>{}</strong>: '.format(key)
if isinstance(value, list):
html += '<ul>'
for item in value:
html += '<li>{}</li>'.format(format_string(item))
html += '</ul>'
elif isinstance(value, dict):
html += dict_to_html(value)
else:
html += format_string(value)
html += '</li>'
html += '</ul>'
return html
def print_dict(dictionary):
html = dict_to_html(dictionary)
display(HTML(html))
def offset_alias_to_seconds(alias):
"""Seconds for each period length."""
dr = pd.date_range('00:00', periods=2, freq=alias)
return (dr[-1] - dr[0]).total_seconds()
def check_directory_exists(d):
if not isdir(d):
raise IOError("Directory '{}' does not exist.".format(d))
def tz_localize_naive(timestamp, tz):
if tz is None:
return timestamp
if timestamp is None or pd.isnull(timestamp):
return pd.NaT
timestamp = pd.Timestamp(timestamp)
if timestamp_is_naive(timestamp):
timestamp = timestamp.tz_localize('UTC')
return timestamp.tz_convert(tz)
def get_tz(df):
index = df.index
try:
tz = index.tz
except AttributeError:
tz = None
return tz
def timestamp_is_naive(timestamp):
"""
Parameters
----------
timestamp : pd.Timestamp or datetime.datetime
Returns
-------
True if `timestamp` is naive (i.e. if it does not have a
timezone associated with it). See:
https://docs.python.org/2/library/datetime.html#available-types
"""
if timestamp.tzinfo is None:
return True
elif timestamp.tzinfo.utcoffset(timestamp) is None:
return True
else:
return False
def get_datastore(filename, format, mode='a'):
"""
Parameters
----------
filename : string
format : 'CSV' or 'HDF'
mode : 'a' (append) or 'w' (write), optional
Returns
-------
metadata : dict
"""
if filename is not None:
if format == 'HDF':
return HDFDataStore(filename, mode)
elif format == 'CSV':
return CSVDataStore(filename)
else:
raise ValueError('format not recognised')
else:
ValueError('filename is None')
def normalise_timestamp(timestamp, freq):
"""Returns the nearest Timestamp to `timestamp` which would be
in the set of timestamps returned by pd.DataFrame.resample(freq=freq)
"""
timestamp = pd.Timestamp(timestamp)
series = pd.Series(np.NaN, index=[timestamp])
resampled = series.resample(freq)
return resampled.index[0]
def print_on_line(*strings):
print(*strings, end="")
stdout.flush()
def append_or_extend_list(lst, value):
if value is None:
return
elif isinstance(value, list):
lst.extend(value)
else:
lst.append(value)
def convert_to_list(list_like):
return [] if list_like is None else list(list_like)
def most_common(lst):
"""Returns the most common entry in lst."""
lst = list(lst)
counts = {item:lst.count(item) for item in set(lst)}
counts = pd.Series(counts)
counts.sort()
most_common = counts.index[-1]
return most_common
def capitalise_first_letter(string):
return string[0].upper() + string[1:]
def capitalise_index(index):
labels = list(index)
for i, label in enumerate(labels):
labels[i] = capitalise_first_letter(label)
return labels
def capitalise_legend(ax):
legend_handles = ax.get_legend_handles_labels()
labels = capitalise_index(legend_handles[1])
ax.legend(legend_handles[0], labels)
return ax
| apache-2.0 |
moreymat/attelo | attelo/cmd/inspect.py | 3 | 1923 | "show properties about models"
from __future__ import print_function
import codecs
import joblib
from ..args import (add_model_read_args)
from ..io import (load_labels, load_vocab)
from ..score import (discriminating_features)
from ..report import (show_discriminating_features)
from ..table import UNKNOWN
from ..util import (Team)
# ---------------------------------------------------------------------
# main
# ---------------------------------------------------------------------
DEFAULT_TOP = 3
'default top number of features to show'
def config_argparser(psr):
"add subcommand arguments to subparser"
add_model_read_args(psr, "{} model to inspect")
psr.add_argument("features", metavar="FILE",
help="sparse features file (just for labels)")
psr.add_argument("vocab", metavar="FILE",
help="feature vocabulary")
psr.add_argument("--top", metavar="N", type=int,
default=DEFAULT_TOP,
help=("show the best N features "
"(default: {})".format(DEFAULT_TOP)))
psr.add_argument("--output", metavar="FILE",
help="output to file")
psr.set_defaults(func=main)
def main(args):
"subcommand main (invoked from outer script)"
models = Team(attach=joblib.load(args.attachment_model),
label=joblib.load(args.relation_model))
# FIXME find a clean way to properly read ready-for-use labels
# upstream ; true labels are 1-based in svmlight format but 0-based
# for sklearn
labels = [UNKNOWN] + load_labels(args.features)
vocab = load_vocab(args.vocab)
discr = discriminating_features(models, labels, vocab, args.top)
res = show_discriminating_features(discr)
if args.output is None:
print(res)
else:
with codecs.open(args.output, 'wb', 'utf-8') as fout:
print(res, file=fout)
| gpl-3.0 |
justincassidy/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
lhilt/scipy | scipy/interpolate/fitpack2.py | 4 | 63081 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, ravel, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
# ############### Univariate spline ####################
_curfit_messages = {1: """
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2: """
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3: """
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10: """
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing;
must be strictly increasing if `s` is 0.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("x and y array must not contain "
"NaNs or infs.")
if s is None or s > 0:
if not np.all(diff(x) >= 0.0):
raise ValueError("x must be increasing if s > 0")
else:
if not np.all(diff(x) > 0.0):
raise ValueError("x must be strictly increasing if s = 0")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
xe=bbox[1], s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None, None, None, None, None, k, None, len(t), t,
c, None, None, None, None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
self._eval_args = t[:n], c[:n], k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k, m = data[5], len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in
[8, 9, 11, 12]]
args = data[:8] + (t, c, n, fpint, nrdata, data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k, n = data[5], data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k, n = data[5], data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a, b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d, ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z, m, ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
# if self.ext is 'const', derivative.ext will be 'zeros'
ext = 1 if self.ext == 3 else self.ext
return UnivariateSpline._from_tck(tck, ext=ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
Spline function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be strictly increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("Input must not contain NaNs or infs.")
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
xe=bbox[1], s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
if not np.all(diff(x) >= 0.0):
raise ValueError('x must be increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
# ############### Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
Note that the axis ordering is inverted relative to
the output of meshgrid.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
x = np.asarray(x)
y = np.asarray(y)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1: """
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2: """
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3: """
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4: """
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5: """
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10: """
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3: """
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline :
a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
RectSphereBivariateSpline
SmoothSphereBivariateSpline :
LSQSphereBivariateSpline
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx,"
" ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
xb, xe, yb,
ye, kx, ky,
s=s, eps=eps,
lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
xb, xe, yb,
ye, kx, ky,
s=s,
eps=eps,
lwrk2=ier)
if ier in [0, -1, -2]: # normal return
pass
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx, ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,), float)
ty1 = zeros((ny,), float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb, xe, yb, ye = bbox
tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w,
xb, xe, yb, ye,
kx, ky, eps, lwrk2=1)
if ier > 10:
tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w,
xb, xe, yb, ye,
kx, ky, eps, lwrk2=ier)
if ier in [0, -1, -2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
if not np.all(diff(y) > 0.0):
raise ValueError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise ValueError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise ValueError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise ValueError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise ValueError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
Determines a smooth bicubic spline according to a given
set of knots in the `theta` and `phi` directions.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message, stacklevel=2)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians. First element (v[0]) must lie
within the interval [-pi, pi). Last element (v[-1]) must satisfy
v[-1] <= v[0] + 2*pi.
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Choosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in range(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise ValueError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise ValueError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise ValueError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise ValueError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.19/_downloads/8e72c04782fa9f41c5841a3f285cad83/plot_decoding_spoc_CMC.py | 4 | 3138 | """
====================================
Continuous Target Decoding with SPoC
====================================
Source Power Comodulation (SPoC) [1]_ allows to identify the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from
`Cortico-Muscular Coherence example of FieldTrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>`_
References
----------
.. [1] Dahne, S., et al (2014). SPoC: a novel framework for relating the
amplitude of neuronal oscillations to behaviorally relevant parameters.
NeuroImage, 86, 111-122.
"""
# Author: Alexandre Barachant <alexandre.barachant@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from mne.channels import read_layout
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# Define parameters
fname = data_path() + '/SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 250.).load_data() # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft'])
emg.filter(20., None, fir_design='firwin')
# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False)
raw.filter(15., 30., fir_design='firwin')
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=.250)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.500, baseline=None,
detrend=1, decim=8)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# Plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
##############################################################################
# Plot the contributions to the detected components (i.e., the forward model)
spoc.fit(X, y)
layout = read_layout('CTF151.lay')
spoc.plot_patterns(meg_epochs.info, layout=layout)
| bsd-3-clause |
r3kall/AnimeRecommenderSystem | animerecommendersystem/recommender_systems/FuzzyClusteringRS.py | 1 | 4263 | from sklearn.neighbors import NearestNeighbors
from animerecommendersystem.utils.utils_functions import sort_list
from collections import defaultdict
STD_NUM_RECOMM = 20
STD_NUM_NEIGHBORS = 7
# Codes used for specifying the way we take recommendations from neighbors
FIRST_USER_FIRST = 0
ITERATIVE = 1
# Constants for vote prediction
MAX_PREDICT_RATE = 10.
MIN_PREDICT_RATE = 1.
class FuzzyCluseringRS:
def __init__(self, users_anime_lists, users_clusters_matrix, users_clusters_dict,
users_clusters_indices, num_neighbors=STD_NUM_NEIGHBORS,
num_recommendations=STD_NUM_RECOMM, how_to=FIRST_USER_FIRST):
self.users_anime_lists = users_anime_lists
self.users_clusters_matrix = users_clusters_matrix
self.users_clusters_dict = users_clusters_dict
self.users_clusters_indices = users_clusters_indices
self.num_neighbors = num_neighbors
self.num_recommendations = num_recommendations
self.how_to = how_to
self.recommendations_list = list()
def get_neighbors(self, user_name):
neigh = NearestNeighbors(n_neighbors=self.num_neighbors+1,
metric='cosine', algorithm='brute', n_jobs=-1)
neigh.fit(self.users_clusters_matrix)
vector = self.users_clusters_dict[user_name]
distances, indices = neigh.kneighbors(vector.reshape(1, -1),
return_distance=True)
nearest_neighbors_dict = defaultdict(float)
for i in range(1, self.num_neighbors+1):
user_index = indices[0][i]
similarity = 1. - distances[0][i]
nearest_neighbors_dict[self.users_clusters_indices[user_index]] = similarity
return nearest_neighbors_dict
def get_recommendations(self, user):
"""
:param user: Name of the user we want to give suggetions to
:return: a list of animes that could (possibly) be interesting to him/her
"""
# Invoke kNN on the matrix to get neighbors
neighbors_dict = self.get_neighbors(user)
predictions_rates_dict = defaultdict(float)
predictions_rates_num_dict = dict()
predictions_rates_den_dict = dict()
user_animes = self.users_anime_lists[user]
for neighbor in neighbors_dict.keys():
neighbor_animes = self.users_anime_lists[neighbor]
for anime in neighbor_animes['list'].keys():
if anime not in user_animes['list'].keys():
neighbor_rate = neighbor_animes['list'][anime]['rate']
if neighbor_rate >= 6:
predictions_rates_num_dict[anime] = predictions_rates_num_dict.get(anime, 0) + \
neighbors_dict[neighbor] * \
(neighbor_rate - self.users_anime_lists[neighbor][
'mean_rate'])
predictions_rates_den_dict[anime] = predictions_rates_den_dict.get(anime, 0) + \
neighbors_dict[neighbor]
for anime in predictions_rates_num_dict.keys():
if predictions_rates_den_dict[anime] == 0:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate']
else:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate'] + \
(float(predictions_rates_num_dict[anime]) / float(
predictions_rates_den_dict[anime]))
if predictions_rates_dict[anime] < MIN_PREDICT_RATE:
predictions_rates_dict[anime] = MIN_PREDICT_RATE
elif predictions_rates_dict[anime] > MAX_PREDICT_RATE:
predictions_rates_dict[anime] = MAX_PREDICT_RATE
sorted_animes = sorted(predictions_rates_dict, key=predictions_rates_dict.get, reverse=True)
results = list()
for anime in sorted_animes[:self.num_recommendations]:
results.append((anime, predictions_rates_dict[anime]))
return results
| gpl-3.0 |
Capstone2017/Machine-Learning-NLP | notebook/timespan_plot.py | 1 | 1983 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 19 19:16:40 2016
@author: Nero
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
reader = pd.read_csv('application_data.csv', iterator=True)
loop = True
chunkSize = 100000
chunks = []
while loop:
try:
chunkonce = reader.get_chunk(chunkSize)
chunks.append(chunkonce)
'''chunks.append(chunk)'''
except StopIteration:
loop = False
print ('1')
chunk=pd.concat(chunks, ignore_index=True)
a=pd.DataFrame()
a['filing_date']=chunk['filing_date']
a['patent_issue_date']=chunk['patent_issue_date']
a=a[pd.notnull(a['filing_date'])]
a=a[pd.notnull(a['patent_issue_date'])]
timebegins=pd.to_datetime(a['filing_date'], format="%Y-%m-%d")
timeends=pd.to_datetime(a['patent_issue_date'], format="%Y-%m-%d")
timespanyears=timeends-timebegins
timespanyears=timespanyears.to_frame()
timespanyears[0]=timespanyears[0].astype(np.int64)
timespanyears=timespanyears[timespanyears[0]>0]
timespanyears[0]=timespanyears[0]/(1000000000*3600*24*365)
''' Remove for loop by remove nan from a and convert it to datetime
timespanyears=[]
for index in range(a.shape[0]):
if(type(a['filing_date'][index])==type('string')
and type(a['patent_issue_date'][index])==type('string')):
filing_dt=datetime.datetime.strptime(a['filing_date'][index],'%Y-%m-%d')
issue_dt=datetime.datetime.strptime(a['patent_issue_date'][index],'%Y-%m-%d')
timespan=issue_dt-filing_dt
x=timespan.total_seconds()/(3600*24*365)
if(x>=0):
timespanyears.append(x)
'''
application_number=[]
invention_subject_matter=[]
disposal_type=[]
'''z=np.histogram(timespanyears,bins=[0,1,2,3,4,5,6,7,8,9,10])'''
plt.hist(timespanyears[0],bins=np.linspace(0,8))
plt.title("Application time span(5,619,688/9,817,693)")
plt.xlabel("Years")
plt.ylabel("Amount")
plt.savefig('spandistribution',dpi=1000)
plt.show()
| mit |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_inst_Rot/Geneva_inst_Rot_6/fullgrid/Rest.py | 30 | 9192 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [3,4,15,22,37,53,54,55,57,62,77,88,89,90,92,93]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Rest of the Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_Rest.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
antoinecarme/pyaf | tests/codegen/test_random_exogenous_code_gen.py | 1 | 1330 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import pyaf.CodeGen.TS_CodeGenerator as tscodegen
import warnings
with warnings.catch_warnings():
warnings.simplefilter("error")
b1 = tsds.generate_random_TS(N = 600 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 12, transform = "", sigma = 0.0, exog_count = 20);
df = b1.mPastData
# this script works on mysql with N = 600, exog_count = 20 when thread_stack = 1920K in
# /etc/mysql/mysql.conf.d/mysqld.cnf
#df.to_csv("outputs/rand_exogenous.csv")
H = b1.mHorizon;
N = df.shape[0];
for n in range(H, N , 10):
df1 = df.head(n).copy();
lEngine = autof.cForecastEngine()
# lEngine.mOptions.mEnableSeasonals = False;
# lEngine.mOptions.mDebugCycles = False;
lEngine
lExogenousData = (b1.mExogenousDataFrame , b1.mExogenousVariables)
lEngine.train(df1 , b1.mTimeVar , b1.mSignalVar, H, lExogenousData);
lEngine.getModelInfo();
lEngine.standardPlots(name = "outputs/my_rand_exog_" + str(n));
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lCodeGenerator = tscodegen.cTimeSeriesCodeGenerator();
lSQL = lCodeGenerator.testGeneration(lEngine);
| bsd-3-clause |
timmie/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 3 | 8796 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from nose.tools import assert_equal
try:
from nose.tools import assert_raises_regex
except ImportError:
from nose.tools import assert_raises_regexp as assert_raises_regex
from matplotlib.axes import Axes
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert_equal(result, expected)
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert_equal(result, expected)
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert_equal(result, expected)
| gpl-3.0 |
saiwing-yeung/scikit-learn | sklearn/calibration.py | 18 | 19402 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y``
is neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
elhuhdron/emdrp | neon3/data/parseEMdata.py | 1 | 111047 | # The MIT License (MIT)
#
# Copyright (c) 2016 Paul Watkins, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Generate EM data for cuda-convnet2 and neon.
# Also added "unpackager" routine for recreating output probabilities convnet outputs.
# Data is parsed out of hdf5 files for raw EM data and for labels.
# Each batch is then generated on-demand (in parallel with training previous batch).
# This process should not take more than a few seconds per batch (use verbose flag to print run times).
# The process must be shorter than the GPU batch time, so that the parsing is not the speed bottleneck.
# New feature allows for batches from multiple areas of the dataset, each area (given by size parameter and list of
# "chunk" ranges) is loaded once per batch.
#
# The hdf5 inputs can be C-order or F-order (specified in ini). hdf5 outputs always written in F-order.
import h5py
import numpy as np
from operator import add #, sub
import time
import numpy.random as nr
import os, sys
from configobj import ConfigObj, flatten_errors
from validate import Validator, ValidateError
import random
import pickle as myPickle
import io as myStringIO
# for elastic transform
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
# http://stackoverflow.com/questions/15704010/write-data-to-hdf-file-using-multiprocessing
import multiprocessing as mp
import sharedmem
def handle_hdf5_prob_output(start_queue, done_queue, probs_out, ind, label_names, outpath):
outfile = h5py.File(outpath, 'r+')
while True:
args = start_queue.get()
if args:
for n in range(len(label_names)):
d = probs_out[:,:,:,n].transpose((2,1,0)); dset = outfile[label_names[n]]
dset[ind[0]:ind[0]+d.shape[0],ind[1]:ind[1]+d.shape[1],ind[2]:ind[2]+d.shape[2]] = d
done_queue.put(1)
else:
done_queue.put(1)
break
outfile.close()
def handle_knossos_prob_output(start_queue, done_queue, probs_out, ind, label_names, outpath, strnetid):
while True:
args = start_queue.get()
if args:
curpath = os.path.join(outpath, 'x%04d' % ind[0], 'y%04d' % ind[1], 'z%04d' % ind[2])
try: os.makedirs(curpath)
except: pass
for n in range(len(label_names)):
d = probs_out[:,:,:,n].transpose((2,1,0))
d.tofile(os.path.join(curpath, label_names[n] + strnetid + '.f32'))
done_queue.put(1)
else:
done_queue.put(1)
break
class EMDataParser():
# Constants
# Numpy type to be used for cube subscripts. Throwback to parsing cube on GPU memory, but kept type for numpy.
# Very unlikely to need more than 16 bits per index (also supports loads in imagej from hdf5).
cubeSubType = np.uint16; cubeSubLim = 65536
# optional output file names / dataset names
INFO_FILE = 'batch.info'
OUTPUT_H5_CVIN = 'batch_input_data.h5'
OUTPUT_H5_CVOUT = 'batch_output_data.h5'
PRIOR_DATASET = 'prior_train'
# Where a batch is within these ranges allows for different types of batches to be selected:
# 1 to FIRST_RAND_NOLOOKUP_BATCH-1 are label lookup table randomized examples from all training cubes
# FIRST_RAND_NOLOOKUP_BATCH - FIRST_TILED_BATCH-1 are randomized examples from all training cubes
# FIRST_TILED_BATCH - (batch with max test chunk / zslice) are tiled examples from the rand then test cubes
# or all sequential cubes in chunk list or range mode
FIRST_RAND_NOLOOKUP_BATCH = 100001
FIRST_TILED_BATCH = 200001
# others
NAUGS = 32 # total number of simple augmentations (including reflections in z, 8 for xy augs only)
HDF5_CLVL = 5 # compression level in hdf5
def __init__(self, cfg_file, write_outputs=False, init_load_path='', save_name=None, append_features=False,
chunk_skip_list=[], dim_ordering='', image_in_size=None, isTest=False):
self.cfg_file = cfg_file
self.write_outputs = write_outputs; self.save_name = save_name; self.append_features = append_features
print('EMDataParser: config file ''%s''' % cfg_file)
# retrieve / save options from ini files, see definitions parseEMdata.ini
opts = EMDataParser.get_options(cfg_file)
for k, v in list(opts.items()):
if type(v) is list and k not in ['chunk_skip_list','aug_datasets']:
if len(v)==1:
setattr(self,k,v[0]) # save single element lists as first element
elif len(v)>0 and type(v[0]) is int: # convert the sizes and offsets to numpy arrays
setattr(self,k,np.array(v,dtype=np.int32))
else:
setattr(self,k,v) # store other list types as usual (floats, empties)
else:
setattr(self,k,v)
# Options / Inits
self.isTest = isTest # added this for allowing test/train to use same ini file in chunk_list_all mode
# added in another "sub-mode" of append features to write knossos-style raw outputs instead
# xxx - guh, this has to be set externally due to the many overlapping feature adds / backcompat done here
self.append_features_knossos = False
self.strnetid = ''; # unique integer for different trained nets for use with knossos-style output format
# Previously had these as constants, but moved label data type to ini file and special labels are defined
# depending on the data type.
self.cubeLblType = eval('np.' + self.cubeLblTypeStr)
self.EMPTY_LABEL = np.iinfo(self.cubeLblType).max
#self.ECS_LABEL = np.iinfo(self.cubeLblType).max-1 # xxx - likely not using this, but keep here for now
self.ECS_LABEL = self.EMPTY_LABEL # makes default for ECS not select any ECS no matter how it's labeled
self.EMPTY_PROB = -1.0
# the manner in which the zreslice is defined, define sort from data -> re-order and from re-order -> data.
# only 3 options because data can be automatically augmented to transpose the first two dims (in each "z-slice")
# these orders were chosen because the "unsort" is the same as the "sort" indexing, so re-order->data not needed
if dim_ordering: self.dim_ordering = dim_ordering # allow command line override
if self.dim_ordering=='xyz':
self.zreslice_dim_ordering = [0,1,2]
self.zreslice_dim_ordering_index = 0
elif self.dim_ordering=='xzy':
self.zreslice_dim_ordering = [0,2,1]
self.zreslice_dim_ordering_index = 1
elif self.dim_ordering=='zyx':
self.zreslice_dim_ordering = [2,1,0]
self.zreslice_dim_ordering_index = 2
else:
assert(False) # bad dim_ordering parameter given
# immediately re-order any arguments that need it because of reslice. this prevents from having to do this on
# command line, which ended up being annoying.
# originally reading the hdf5 was done using arguments that were re-ordered on command line, so those needed
# during read are un-re-ordered (back to normal order) in readCubeToBuffers.
# considered changing this, but calculations are more intuitive after the re-order, so left for the sizes
self.size_rand = self.size_rand[self.zreslice_dim_ordering]
self.read_size = self.read_size[self.zreslice_dim_ordering]
self.read_border = self.read_border[self.zreslice_dim_ordering]
if self.nz_tiled < 0: self.nz_tiled = self.size_rand[2]
# initialize for "chunkrange" or "chunklist" mode if these parameters are not empty
self.use_chunk_list = (len(self.chunk_range_beg) > 0); self.use_chunk_range = False
assert( self.use_chunk_list or self.chunk_list_all ) # no chunk_list_all if not chunk_list mode
if self.use_chunk_list:
assert( self.nz_tiled == 0 ) # do not define tiled cube for chunklist mode
self.chunk_range_beg = self.chunk_range_beg.reshape(-1,3); self.chunk_list_index = -1
self.nchunk_list = self.chunk_range_beg.shape[0]
if len(self.chunk_range_end) > 0:
# "chunkrange" mode, chunks are selected based on defined beginning and end of ranges in X,Y,Z
# range is open ended (python-style, end is not included in range)
self.chunk_range_end = self.chunk_range_end.reshape(-1,3);
assert( self.chunk_range_end.shape[0] == self.nchunk_list )
self.chunk_range_index = -1; self.use_chunk_range = True
self.chunk_range_rng = self.chunk_range_end - self.chunk_range_beg
assert( (self.chunk_range_rng >= 0).all() ) # some bad ranges
self.chunk_range_size = self.chunk_range_rng.prod(axis=1)
self.chunk_range_cumsize = np.concatenate((np.zeros((1,),dtype=self.chunk_range_size.dtype),
self.chunk_range_size.cumsum()))
self.chunk_range_nchunks = self.chunk_range_cumsize[-1]
self.nchunks = self.chunk_range_nchunks
else:
# regular chunklist mode, chunk_range_beg just contains the list of the chunks to use
self.nchunks = self.nchunk_list
# this code is shared by defining max number of chunks depending on chunk list or chunk range mode.
# default for the chunk_range_rand is the max number of chunks.
if self.chunk_range_rand < 0: self.chunk_range_rand = self.nchunks
assert( self.chunk_range_rand <= self.nchunks )
# offsets are either per chunk or per range, depending on above mode (whether chunk_range_end empty or not)
if len(self.offset_list) > 0:
self.offset_list = self.offset_list.reshape(-1,3)
assert( self.offset_list.shape[0] == self.nchunk_list )
else:
self.offset_list = np.zeros_like(self.chunk_range_beg)
# create a list for random chunks in chunk_range_rand based on the chunk_skip_list, if provided.
# let command line override definition in ini file.
if len(chunk_skip_list) > 0: self.chunk_skip_list = chunk_skip_list
if len(self.chunk_skip_list) > 0:
mask = np.zeros((self.nchunks,), dtype=np.bool)
mask[:self.chunk_range_rand] = 1
mask[np.array(self.chunk_skip_list, dtype=np.int64)] = 0
self.chunk_rand_list = np.nonzero(mask)[0].tolist()
self.chunk_range_rand = len(self.chunk_rand_list)
# the tiled chunks default to all the chunks.
# if the chunk_skip_list is specified, then the chunk_skip_is_test parameter makes the tiled chunks
# only the chunks that are not rand chunks.
if self.chunk_skip_is_test:
self.chunk_tiled_list = np.nonzero(np.logical_not(mask))[0].tolist()
else:
self.chunk_tiled_list = list(range(self.nchunks))
else:
# in the old mode, typically the test chunks are put at the end of the chunk list,
# and all chunks are in the tiled chunk list.
# this was annoying because a seperate ini file had to be made for each cross-validation.
self.chunk_rand_list = list(range(self.chunk_range_rand))
self.chunk_tiled_list = list(range(self.nchunks))
# xxx - not an easy way not to call initBatches in the beginning without breaking everything,
# so just load the first chunk, if first batch is an incremental chunk rand batch, it should not reload
self.chunk_rand = self.chunk_range_beg[0,:]; self.offset_rand = self.offset_list[0,:]
# print out info for chunklist / chunkrange modes so that input data is logged
print(('EMDataParser: Chunk mode with %d ' % self.nchunk_list) + \
('ranges' if self.use_chunk_range else 'chunks') + \
(' of size %d %d %d:' % tuple(self.size_rand[self.zreslice_dim_ordering].tolist())))
fh = myStringIO.BytesIO()
if self.use_chunk_range:
np.savetxt(fh, np.concatenate((np.arange(self.nchunk_list).reshape((self.nchunk_list,1)),
self.chunk_range_beg, self.chunk_range_end, self.chunk_range_size.reshape((self.nchunk_list,1)),
self.offset_list), axis=1),
fmt='\t(%d) range %d %d %d to %d %d %d (%d chunks), offset %d %d %d',
delimiter='', newline='\n', header='', footer='', comments='')
else:
np.savetxt(fh, np.concatenate((np.arange(self.nchunk_list).reshape((self.nchunk_list,1)),
self.chunk_range_beg, self.offset_list), axis=1), fmt='\t(%d) chunk %d %d %d, offset %d %d %d',
delimiter='', newline='\n', header='', footer='', comments='')
cstr = fh.getvalue(); fh.close(); print(cstr.decode('UTF-8'))
#print '\tchunk_list_rand %d, chunk_range_rand %d' % (self.chunk_list_rand, self.chunk_range_rand)
print('\tchunk_skip_list: ' + str(self.chunk_skip_list))
print('\tchunk_list_all: ' + str(self.chunk_list_all))
# need these for appending features in chunklist mode, otherwise they do nothing
#self.last_chunk_rand = self.chunk_rand; self.last_offset_rand = self.offset_rand
# need special case for first chunk incase there is no next ever loaded (if only one chunk written)
self.last_chunk_rand = None; self.last_offset_rand = None
self.cur_chunk = None # started needing this for chunk_list_all mode
# some calculated values based on constants and input arguments for tiled batches
# allow command line override of image size
if image_in_size: self.image_size = image_in_size
# few checks here on inputs if not checkable by ini specifications
assert( self.nzslices == 1 or self.nzslices == 3 ) # xxx - 1 or 3 (or multiple of 4?) only supported by convnet
# to be certain things don't get off with augmentations, in and out size need to both be even or odd
assert( (self.image_size % 2) == (self.image_out_size % 2) )
assert( self.independent_labels or self.image_out_size == 1 ) # need independent labels for multiple pixels out
assert( not self.no_labels or self.no_label_lookup ) # must have no_label_lookup in no_labels mode
#assert( not self.chunk_list_all or self.no_label_lookup ) # must have no_label_lookup for all chunks loaded
assert( not self.chunk_list_all or not self.write_outputs ) # write_outputs not supported for all chunks loaded
# optionally allow tile_size to specify size for all three orthogonal directions, pick the one we're using
if self.tile_size.size > 3:
self.tile_size_all = self.tile_size.reshape((3,-1))
self.tile_size = self.tile_size_all[self.zreslice_dim_ordering_index,:]
print(('EMDataParser: tile_size %d %d %d' % tuple(self.tile_size.tolist())))
# number of cases per batch should be kept lower than number of rand streams in convnet (128*128 = 16384)
self.num_cases_per_batch = self.tile_size.prod()
self.shape_per_batch = self.tile_size.copy(); self.shape_per_batch[0:2] *= self.image_out_size
# kept this here so I'm not tempted to do it again. tile_size is NOT re-ordered, too confusing that way
#self.shape_per_batch[self.zreslice_dim_ordering[0:2]] *= self.image_out_size
if self.verbose: print("size rand %d %d %d, shape per batch %d %d %d" % \
tuple(np.concatenate((self.size_rand, self.shape_per_batch)).tolist()))
self.size_total = self.size_rand.copy(); self.size_total[2] += self.nz_tiled
assert( ((self.size_total % self.shape_per_batch) == 0).all() )
self.tiles_per_zslice = self.size_rand // self.shape_per_batch
self.pixels_per_image = self.nzslices*self.image_size**2;
self.pixels_per_out_image = self.image_out_size**2;
# need data for slices above and below labels if nzslices > 1, keep data and labels aligned
self.nrand_zslice = self.size_rand[2] + self.nzslices - 1
self.ntiled_zslice = self.nz_tiled + self.nzslices - 1
self.ntotal_zslice = self.nrand_zslice + self.ntiled_zslice
# x/y dims on tiled zslices are the same size as for selecting rand indices
self.size_tiled = np.array((self.size_rand[0], self.size_rand[1], self.nz_tiled), dtype=np.int32)
if self.image_size % 2 == 1:
self.data_slice_size = (self.size_rand[0] + self.image_size - 1, self.size_rand[1] + self.image_size - 1,
self.ntotal_zslice);
else:
self.data_slice_size = (self.size_rand[0] + self.image_size, self.size_rand[1] + self.image_size,
self.ntotal_zslice);
self.labels_slice_size = (self.size_rand[0], self.size_rand[1], self.ntotal_zslice)
# xxx - not sure why I didn't include the nzslice into the labels offset, throwback to old provider only?
self.labels_offset = (self.image_size//2, self.image_size//2, 0)
# these were previously hidden passing to GPU provider, introduce new variables
self.batches_per_zslice = self.tiles_per_zslice[0] * self.tiles_per_zslice[1]
self.num_inds_tiled = self.num_cases_per_batch * self.batches_per_zslice
self.zslices_per_batch = self.tile_size[2]
# there can either be multiple batches per zslice or multiple zslices per batch
assert( (self.batches_per_zslice == 1 and self.zslices_per_batch >= 1) or \
(self.batches_per_zslice > 1 and self.zslices_per_batch == 1) );
self.batches_per_rand_cube = self.nrand_zslice * self.batches_per_zslice // self.zslices_per_batch
self.getLabelMap() # setup for label types, need before any other inits referencing labels
self.segmented_labels_slice_size = tuple(map(add,self.labels_slice_size,
(2*self.segmented_labels_border).tolist()))
#assert( self.segmented_labels_border[0] == self.segmented_labels_border[1] and \
# self.segmented_labels_border[2] == 0 ); # had to add this for use in GPU labels slicing
# because of the affinity graphs, segmented labels can have a border around labels.
# plot and save this way for validation.
self.seg_out_size = self.image_out_size + 2*self.segmented_labels_border[0]
self.pixels_per_seg_out = self.seg_out_size**2
# optional border around "read-size"
# this is used so that densely labeled front-end cubes do not require label merging, instead just don't select
# training examples from around some border of each of these "read-size" cubes.
if (self.read_size < 0).any():
self.read_size = self.size_rand
assert( ((self.size_rand % self.read_size) == 0).all() )
# use the read border to prevent randomized image out patches from going outside of rand size
self.read_border[0:2] += self.image_out_size//2;
# additionally image out patches can be offset from selected pixel (label lookup), so remove this also
# if label lookup is enabled. this randomized offset is used to reduce output patch correlations.
if not self.no_label_lookup:
self.read_border[0:2] += self.image_out_offset//2
self.pixels_per_out_offset = self.image_out_offset**2
# default for label train prior probabilities is uniform for all label types
if type(self.label_priors) is list:
assert( len(self.label_priors) == self.nlabels )
self.initial_label_priors = np.array(self.label_priors,dtype=np.double)
else:
self.initial_label_priors = 1.0/self.nlabels * np.ones((self.nlabels,),dtype=np.double)
# these are used for making the output probability cubes
# xxx - the tiling procedure is confusing, see comments on this in makeTiledIndices
self.output_size = list(self.labels_slice_size)
self.output_size[0] //= self.image_out_size; self.output_size[1] //= self.image_out_size
# for neon output mode that does not actually pickle the output batches
self.batch_outputs = [None] * self.batches_per_rand_cube
self.batch_outputs_ind = 0
# variables containing actual number of convnet outputs depending on label config
self.nclass = self.noutputs if self.independent_labels else self.nIndepLabels
self.oshape = (self.image_out_size, self.image_out_size, self.nIndepLabels)
# augmented data cubes can be presented in parallel with raw EM data
self.naug_data = len(self.aug_datasets)
assert( len(self.aug_mean) >= self.naug_data )
assert( len(self.aug_std) >= self.naug_data )
# Originally these were not arrays, but changed so same code can be used without lots of conditionals to
# support chunk_list_all mode which loads all chunks into system memory at once.
n = self.nchunks if self.chunk_list_all else 1
self.aug_data = [[None]*self.naug_data for i in range(n)]
self.data_cube = [None]*n
self.segmented_labels_cube = [None]*n
self.labels_cube = [None]*n
# need a copy of initial label priors, incase priors needs to be modified because of missing labels
# when creating the rand label lookup (makeRandLabelLookup)
self.label_priors = [self.initial_label_priors.copy() for i in range(n)]
self.inds_label_lookup = [self.nlabels*[None] for i in range(n)]
self.label_lookup_lens = [self.nlabels*[0] for i in range(n)]
# print out all initialized variables in verbose mode
if self.verbose:
tmp = vars(self); #tmp['indep_label_names_out'] = 'removed from print for brevity'
print('EMDataParser, vars after init:\n'); print(tmp)
# other inits
self.rand_priors = self.nlabels * [0.0]; self.tiled_priors = self.nlabels * [0.0]
# the prior to use to reweight exported probabilities.
# training prior is calculated on-the-fly by summing output label targets.
# this is done because for multiple outputs and label selection schemes different from the labels
# label_priors does not give a good indication of the labels that actual end up getting selected
# xxx - might be better to rename label_priors and other priors since they are not acutal used as priors
self.prior_test = np.array(self.prior_test, dtype=np.double)
def initBatches(self, silent=False):
# turns off printouts during runs if in chunklist or chunkrange mode
self.silent = silent
if self.write_outputs:
if not os.path.exists(self.outpath): os.makedirs(self.outpath)
outfile = open(os.path.join(self.outpath, self.INFO_FILE), 'w'); outfile.close(); # truncate
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'w'); outfile.close(); # truncate
if self.chunk_list_all:
# This allows different test and train parsers that both do not have to load all chunks but can still use
# the same ini file.
cl = self.chunk_tiled_list if self.isTest else self.chunk_rand_list
for c in range(len(cl)):
self.setChunkList(c, cl)
self.readCubeToBuffers(cl[c])
self.setupAllLabels(cl[c])
if not self.no_label_lookup: self.makeRandLabelLookup(cl[c])
else:
self.readCubeToBuffers()
self.setupAllLabels()
if not self.no_label_lookup: self.makeRandLabelLookup()
if self.write_outputs: self.enumerateTiledLabels()
# for chunklist or chunkrange modes, the tiled indices do not change, so no need to regenerate them
# xxx - use hasattr for this in a number of spots, maybe change to a single boolean for readability?
if not hasattr(self, 'inds_tiled'): self.makeTiledIndices()
if self.write_outputs: self.writeH5Cubes()
self.makeBatchMeta()
self.silent = False
def makeRandLabelLookup(self, chunkind=0):
#assert( not self.chunk_list_all ) # random batches with lookup not intended for all chunks loaded mode
if not self.silent: print('EMDataParser: Creating rand label lookup for specified zslices')
self.label_priors[chunkind][:] = self.initial_label_priors # incase prior was modified below in chunk mode
max_total_voxels = self.size_rand.prod() # for heuristic below - xxx - rethink this, or add param?
total_voxels = 0
#self.inds_label_lookup = self.nlabels*[None]
for i in range(self.nlabels):
inds = np.transpose(np.nonzero(self.labels_cube[chunkind][:,:,0:self.nrand_zslice] == i))
if inds.shape[0] > 0:
# don't select from end slots for multiple zslices per case
inds = inds[np.logical_and(inds[:,2] >= self.nzslices//2,
inds[:,2] < self.nrand_zslice-self.nzslices//2),:]
inds = self.rand_inds_remove_border(inds)
# if after removing borders a label is missing, do not hard error, just force the prior to zero
# xxx - heuristic for removing labels with very few members, 1/32^3, use param?
if inds.shape[0] == 0 or float(inds.shape[0])/max_total_voxels < 3.0517578125e-05:
if not self.silent: print('EMDataParser: no voxels with label %d forcing prior to zero' % i)
# redistribute current prior amongst remaining nonzero priors
prior = self.label_priors[chunkind][i]; self.label_priors[chunkind][i] = 0
pinds = np.arange(self.nlabels)[self.label_priors[chunkind] > 0]
self.label_priors[chunkind][pinds] += prior/pinds.size
#assert( self.label_priors.sum() - 1.0 < 1e-5 )
inds += self.labels_offset
assert( np.logical_and(inds >= 0, inds < self.cubeSubLim).all() )
self.label_lookup_lens[chunkind][i] = inds.shape[0]; total_voxels += inds.shape[0]
self.inds_label_lookup[chunkind][i] = inds.astype(self.cubeSubType, order='C')
if self.write_outputs:
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'a');
outfile.create_dataset('inds_label_lookup_%d' % i,data=inds,compression='gzip',
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
outfile.close();
else:
# if a label is missing, must specify label priors on command line to handle this.
# xxx - maybe do the same as above for this, just remove and redistribute this prior?
if not self.silent: print('EMDataParser: no voxels with label %d' % i)
assert(self.label_priors[chunkind][i] == 0) # prior must be zero if label is missing
assert(total_voxels > 0);
self.rand_priors = [float(x)/total_voxels for x in self.label_lookup_lens[chunkind]]
if self.write_outputs:
outfile = open(os.path.join(self.outpath, self.INFO_FILE), 'a')
outfile.write('\nTotal voxels included for random batches %u\n' % (total_voxels,))
for i in range(self.nlabels):
outfile.write('label %d %s percentage of voxels = %.8f , count = %d, use prior %.8f\n' %\
(i,self.label_names[i],self.rand_priors[i],self.label_lookup_lens[0][i],self.label_priors[0][i]))
outfile.write('Sum percentage of allowable rand voxels = %.3f\n' % sum(self.rand_priors))
outfile.close();
# border is used to not select training examples from areas of "read-size" cubes between which the labels are
# potentially not consistent (because they were densely labeled separately).
def rand_inds_remove_border(self, inds):
nread_cubes = (self.size_rand // self.read_size)
for d in range(3):
bmin = self.read_border[d]; inds = inds[inds[:,d] >= bmin,:]
for i in range(1,nread_cubes[d]):
bmin = i*self.read_size[d] - self.read_border[d]; bmax = i*self.read_size[d] + self.read_border[d];
inds = inds[np.logical_or(inds[:,d] < bmin, inds[:,d] >= bmax),:]
bmax = self.size_rand[d] - self.read_border[d]; inds = inds[inds[:,d] < bmax,:]
return inds
def enumerateTiledLabels(self):
if not self.silent: print('EMDataParser: Enumerating tiled labels (for prior probabilities)')
#total_voxels = self.size_tiled.prod() # because of potential index selects or missing labels, sum instead
tiled_count = self.nlabels * [0]; total_voxels = 0
for i in range(self.nlabels):
inds = np.transpose(np.nonzero(self.labels_cube[0][:,:,self.nrand_zslice:self.ntotal_zslice] == i))
if inds.shape[0] > 0:
# don't select from end slots for multiple zslices per case
inds = inds[np.logical_and(inds[:,2] >= self.nzslices//2,
inds[:,2] < self.ntiled_zslice-self.nzslices//2),:]
tiled_count[i] += inds.shape[0]; total_voxels += inds.shape[0]
outfile = open(os.path.join(self.outpath, self.INFO_FILE), 'a')
outfile.write('\nTotal voxels included for tiled %u\n' % (total_voxels,))
if total_voxels > 0:
self.tiled_priors = [float(x)/total_voxels for x in tiled_count]
for i in range(self.nlabels):
outfile.write('label %d %s percentage of voxels = %.8f , count = %d, use prior %.8f\n' \
% (i,self.label_names[i],self.tiled_priors[i],tiled_count[i],self.label_priors[0][i]))
outfile.write('Sum percentage of allowable tiled voxels = %.3f\n\n' % sum(self.tiled_priors))
# priors again for copy / paste convenience (if using in convnet param file)
outfile.write('Priors train: %s\n' % ','.join('%.8f' % i for i in self.label_priors[0]))
if total_voxels > 0:
outfile.write('Priors test: %s\n' % ','.join('%.8f' % i for i in self.tiled_priors))
if not self.no_label_lookup:
outfile.write('Priors rand: %s\n\n' % ','.join('%.8f' % i for i in self.rand_priors))
# other useful info (for debugging / validating outputs)
outfile.write('data_shape %dx%dx%d ' % self.data_cube[0].shape)
outfile.write('labels_shape %dx%dx%d\n' % self.labels_cube[0].shape)
outfile.write('num_rand_zslices %d, num_tiled_zslices %d, zslice size %dx%d\n' %\
(self.size_rand[2], self.nz_tiled, self.size_rand[0], self.size_rand[1]))
outfile.write('num_cases_per_batch %d, tiles_per_zslice %dx%dx%d\n' %\
(self.num_cases_per_batch, self.tiles_per_zslice[0], self.tiles_per_zslice[1],
self.tiles_per_zslice[2]))
outfile.write('image_out_size %d, tile_size %dx%dx%d, shape_per_batch %dx%dx%d\n' %\
(self.image_out_size, self.tile_size[0], self.tile_size[1], self.tile_size[2],
self.shape_per_batch[0], self.shape_per_batch[1], self.shape_per_batch[2]))
outfile.close()
def makeTiledIndices(self):
# xxx - realized that after writing it this way, just evenly dividing the total number of pixels per zslice
# would have also probably worked fine. this code is a bit confusing, but it works. basically just makes it
# so that each batch is a rectangular tile of a single zslice, instead of some number of pixels in the zslice.
# this method has also been extended for the case of image_out patches to get multiple z-slices per batch.
# the method is quite confusing, but it is working so leaving it as is for now, might consider revising this.
if not self.silent: print('EMDataParser: Creating tiled indices (typically for test and writing outputs)')
# create the indices for the tiled output - multiple tiles per zslice
# added the z dimension into the indices so multiple outputs we can have multiple z-slices per batch.
# tile_size is the shape in image output patches for each batch
# shape_per_batch is the shape in voxels for each batch
# if more than one tile fits in a zslice, the first two dimensions of tiles_per_zslice gives this shape.
# if one tile is a single zslice, then the third dim of tiles_per_zslice gives number of zslices in a batch.
# swapped the dims so that z is first (so it changes the slowest), need to re-order below when tiling.
inds_tiled = np.zeros((3,self.tile_size[2],self.tiles_per_zslice[1],self.tiles_per_zslice[0],
self.tile_size[0],self.tile_size[1]), dtype=self.cubeSubType, order='C')
for x in range(self.tiles_per_zslice[0]):
xbeg = self.labels_offset[0] + x*self.shape_per_batch[0] + self.image_out_size//2
for y in range(self.tiles_per_zslice[1]):
ybeg = self.labels_offset[1] + y*self.shape_per_batch[1] + self.image_out_size//2
inds = np.require(np.mgrid[0:self.shape_per_batch[2], xbeg:(xbeg+self.shape_per_batch[0]):\
self.image_out_size, ybeg:(ybeg+self.shape_per_batch[1]):self.image_out_size], requirements='C')
assert( np.logical_and(inds >= 0, inds < self.cubeSubLim).all() )
inds_tiled[:,:,y,x,:,:] = inds # yes, dims are swapped, this is correct (meh)
# unswap the dims, xxx - again, maybe this should be re-written in a simpler fashion?
self.inds_tiled = inds_tiled.reshape((3,self.num_inds_tiled))[[1,2,0],:]
# create another copy that is used for generating the output probabilities (used to be unpackager).
# this is also confusing using this method of tiling, see comments above.
# xxx - the transpose is a throwback to how it was written previously in unpackager.
# could change subscript order here and in makeOutputCubes, did not see a strong need for this, view is fine
self.inds_tiled_out = self.inds_tiled.copy().T
self.inds_tiled_out[:,0] -= self.labels_offset[0]; self.inds_tiled_out[:,1] -= self.labels_offset[1];
self.inds_tiled_out[:,0:2] //= self.image_out_size
# xxx - these are from the old unpackager, should be self-consistent now, so removed this assert
#assert( ((self.inds_tiled_out[:,0] >= 0) & (self.inds_tiled_out[:,0] < self.labels_slice_size[0]) & \
# (self.inds_tiled_out[:,1] >= 0) & (self.inds_tiled_out[:,1] < self.labels_slice_size[1]) & \
# (self.inds_tiled_out[:,2] >= 0)).all() )
#assert( self.batches_per_zslice*self.num_cases_per_batch == self.inds_tiled_out.shape[0] )
if self.write_outputs:
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'a');
outfile.create_dataset('tiled_indices',(3,self.tiles_per_zslice[0]*self.tile_size[0],
self.tiles_per_zslice[1]*self.tile_size[1]*self.tile_size[2]),data=inds_tiled,compression='gzip',
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
outfile.close();
def writeH5Cubes(self):
print('EMDataParser: Exporting raw data / labels to hdf5 for validation at "%s"' % (self.outpath,))
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'a');
outfile.create_dataset('data',data=self.data_cube[0].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
# copy the attributes over
for name,value in list(self.data_attrs.items()):
outfile['data'].attrs.create(name,value)
if self.labels_cube[0].size > 0:
outfile.create_dataset('labels',data=self.labels_cube[0].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
outfile.create_dataset('segmented_labels',data=self.segmented_labels_cube[0].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
for name,value in list(self.labels_attrs.items()):
outfile['segmented_labels'].attrs.create(name,value)
outfile.close();
# this is the main interface method for fetching batches from the memory cached chunk from the hdf5 file.
# in normal mode this fetches batches that have been loaded to memory already (see initBatches).
# in chunklist mode, this can initiate loading a new chunk from a separate location in the hdf5 file.
# the total processing time here needs to be kept under the network batch time, as the load happens in parallel.
#def getBatch(self, batchnum, plot_outputs=False, tiledAug=0, do_preprocess=True):
def getBatch(self, batchnum, plot_outputs=False, tiledAug=0):
t = time.time()
# allocate batch
data = np.zeros((self.pixels_per_image, self.num_cases_per_batch), dtype=np.single, order='C')
aug_data = [None] * self.naug_data
for i in range(self.naug_data):
aug_data[i] = np.zeros((self.pixels_per_image, self.num_cases_per_batch), dtype=np.single, order='C')
if self.no_labels or self.zero_labels:
labels = np.zeros((0, self.num_cases_per_batch), dtype=np.single, order='C')
seglabels = np.zeros((0, self.num_cases_per_batch), dtype=self.cubeLblType, order='C')
else:
labels = np.zeros((self.noutputs, self.num_cases_per_batch), dtype=np.single, order='C')
seglabels = np.zeros((self.pixels_per_seg_out, self.num_cases_per_batch), dtype=self.cubeLblType, order='C')
# get data, augmented data and labels depending on batch type
if batchnum >= self.FIRST_TILED_BATCH:
augs = [tiledAug]
self.getTiledBatch(data,aug_data,labels,seglabels,batchnum,tiledAug)
elif batchnum >= self.FIRST_RAND_NOLOOKUP_BATCH:
augs = self.generateRandNoLookupBatch(data,aug_data,labels,seglabels)
else:
augs = self.generateRandBatch(data,aug_data,labels,seglabels)
# option to return zero labels, need this when convnet is expecting labels but not using them.
# this is useful for dumping features over a large area that does not contain labels.
if self.zero_labels:
labels = np.zeros((self.noutputs, self.num_cases_per_batch), dtype=np.single, order='C')
# replaced preprocessing with scalar mean subtraction and scalar std division.
# For means: < 0 and >= -1 for mean over batch, < -1 for mean over current loaded chunk, 0 to do nothing
# For stds: <= 0 and >= -1 for std over batch, < -1 for std over current loaded chunk, 1 to do nothing
if not plot_outputs:
data -= self.EM_mean if self.EM_mean >= 0 else data.mean()
data /= self.EM_std if self.EM_std > 0 else data.std()
for i in range(self.naug_data):
aug_data[i] -= self.aug_mean[i] if self.aug_mean[i] >= 0 else aug_data[i].mean()
aug_data[i] /= self.aug_std[i] if self.aug_std[i] > 0 else aug_data[i].std()
if self.verbose and not self.silent:
print('EMDataParser: Got batch ', batchnum, ' (%.3f s)' % (time.time()-t,))
# xxx - add another parameter here for different plots?
if plot_outputs: self.plotDataLbls(data,labels,seglabels,augs,pRand=(batchnum < self.FIRST_TILED_BATCH))
#if plot_outputs: self.plotData(data,dataProc,batchnum < self.FIRST_TILED_BATCH)
#time.sleep(5) # useful for "brute force" memory leak debug
#return data, labels
return ([data] if self.no_labels else [data, labels]) + aug_data
def generateRandBatch(self,data,aug_data,labels,seglabels):
assert( not self.no_labels )
if self.use_chunk_list: self.randChunkList() # load a new cube in chunklist or chunkrange modes
# pick labels that will be used to select images to present
#lbls = nr.choice(self.nlabels, (self.num_cases_per_batch,), p=self.label_priors)
augs = np.bitwise_and(nr.choice(self.NAUGS, self.num_cases_per_batch), self.augs_mask)
if self.no_label_lookup:
assert(False) # never implemented balanced randomized batch creation without label lookup
#inds, chunks = self.generateRandNoLookupInds(factor=10)
## generate an inds label lookup on the fly
#inds_label_lookup = [None]*self.nlabels
#for i in range(self.nlabels):
# inds_label_lookup[i] = np.transpose(np.nonzero(self.labels_cube[0][:,:,0:self.nrand_zslice] == i))
else:
# randomize the chunks were are presented also if all chunks are loaded
if self.chunk_list_all:
cl = self.chunk_rand_list; ncl = self.chunk_range_rand
chunks = nr.choice(ncl, (self.num_cases_per_batch,))
# need special label creation here incase priors needed changing for a chunk because
# one of the label types was missing (for example ECS in a low ECS dataset).
lbls = np.zeros((self.num_cases_per_batch,), dtype=np.int64)
for c, chunk in zip(list(range(ncl)), cl):
sel = (chunks == c); n = sel.sum(dtype=np.int64)
lbls[chunks==c] = nr.choice(self.nlabels, (n,), p=self.label_priors[chunk])
else:
cl = [0]; ncl = 1
chunks = np.zeros((self.num_cases_per_batch,), dtype=np.int64)
lbls = nr.choice(self.nlabels, (self.num_cases_per_batch,), p=self.label_priors[0])
# generate any possible random choices for each label type and chunk, will not use all, for efficiency
inds_lbls = np.zeros((self.nlabels, ncl, self.num_cases_per_batch), dtype=np.uint64)
for c, chunk in zip(list(range(ncl)), cl):
for i in range(self.nlabels):
if self.label_lookup_lens[chunk][i]==0: continue # do not attempt to select labels if there are none
inds_lbls[i,c,:] = nr.choice(self.label_lookup_lens[chunk][i], self.num_cases_per_batch)
# generate a random offset from the selected location.
# this prevents the center pixel from being the pixel that is used to select the image every time and
# reduces correlations in label selection priors between the center and surrounding
# image out patch labels. total possible range of offset comes from image_out_offset parameter.
# an offset (rand range) larger than image_out_size can help reduce corrleations further.
# an offset parameter of 1 causes offset to always be zero and so selection is only based on center pixel.
offset = np.zeros((self.num_cases_per_batch,3), dtype=self.cubeSubType)
offset[:,0:2] = np.concatenate([x.reshape(self.num_cases_per_batch,1) for x in np.unravel_index(\
nr.choice(self.pixels_per_out_offset, (self.num_cases_per_batch,)),
(self.image_out_offset, self.image_out_offset))], axis=1) - self.image_out_offset//2
for imgi in range(self.num_cases_per_batch):
chunk = cl[chunks[imgi]]
inds = self.inds_label_lookup[chunk][lbls[imgi]][inds_lbls[lbls[imgi],chunks[imgi],imgi],:] + offset[imgi,:]
self.getAllDataAtPoint(inds,data,aug_data,imgi,augs[imgi],chunk=chunk)
self.getLblDataAtPoint(inds,labels[:,imgi],seglabels[:,imgi],augs[imgi],chunk=chunk)
self.tallyTrainingPrior(labels)
return augs
def generateRandNoLookupBatch(self,data,aug_data,labels,seglabels):
# load a new cube in chunklist or chunkrange modes
if self.use_chunk_list and not self.chunk_list_all: self.randChunkList()
inds, chunks = self.generateRandNoLookupInds()
augs = np.bitwise_and(nr.choice(self.NAUGS, self.num_cases_per_batch), self.augs_mask)
for imgi in range(self.num_cases_per_batch):
self.getAllDataAtPoint(inds[imgi,:],data,aug_data,imgi,augs[imgi],chunk=chunks[imgi])
if not self.no_labels:
self.getLblDataAtPoint(inds[imgi,:],labels[:,imgi],seglabels[:,imgi],augs[imgi],chunk=chunks[imgi])
if not self.no_labels and not self.zero_labels: self.tallyTrainingPrior(labels)
return augs
def generateRandNoLookupInds(self, factor=2):
# generate random indices from anywhere in the rand cube
if self.no_labels:
size = self.size_rand; offset = self.labels_offset
else:
# xxx - bug was here originally up until K0057, 29 Mar 2017, was missing +1
# plus one since difference of zero actually means no choice of placement after border removed
# (so dimension sized 1), difference of 1 means 2 posible choices, etc
size = self.size_rand - 2*self.read_border + 1
offset = self.labels_offset + self.read_border
nrand_inds = factor*self.num_cases_per_batch
#print(size, self.labels_offset, self.read_border)
inds = np.concatenate([x.reshape((nrand_inds,1)) for x in np.unravel_index(nr.choice(size.prod(),
nrand_inds), size)], axis=1) + offset
# don't select from end slots for multiple zslices per case
inds = inds[np.logical_and(inds[:,2] >= self.nzslices//2, inds[:,2] < self.nrand_zslice-self.nzslices//2),:]
# randomize the chunks were are presented also if all chunks are loaded
if self.chunk_list_all:
chunks = nr.choice(self.chunk_rand_list, (self.num_cases_per_batch,))
else:
chunks = np.zeros((self.num_cases_per_batch,), dtype=np.int64)
return inds, chunks
def tallyTrainingPrior(self, labels):
if 'prior_train_count' not in self.batch_meta: return
# training label counts for calculating prior are allocated in convnet layers.py harness
# so that they can be stored in the convnet checkpoints.
self.batch_meta['prior_total_count'] += self.num_cases_per_batch
if self.independent_labels:
self.batch_meta['prior_train_count'] += labels.astype(np.bool).sum(axis=1)
else:
cnts,edges = np.histogram(labels.astype(np.int32), bins=list(range(0,self.nlabels+1)), range=(0,self.nlabels))
self.batch_meta['prior_train_count'] += cnts
def getTiledBatchOffset(self, batchnum, setChunkList=False):
assert( batchnum >= self.FIRST_TILED_BATCH ) # this is only for tiled batches
# these conversions used to be in data.cu for GPU data provider
batchOffset = batchnum - self.FIRST_TILED_BATCH
# for chunklist mode, the batch also determines which chunk we are in. need to reload if moving to new chunk
if self.use_chunk_list:
chunk = batchOffset // self.batches_per_rand_cube; batchOffset %= self.batches_per_rand_cube
# xxx - moved this here so don't need this requirement for rand, doesn't matter b/c randomly selected.
# it is possible to fix this for tiled, but doesn't seem necessary.
# if it's fixed need to decide what are the chunks... still easier to have them as actual Knossos-sized
# chunks and not as defined by size_rand, then have to change how chunk_range_index is incremented
# xxx - dealt with this in a hacky way below in setChunkList for chunk_range mode
#assert( not self.use_chunk_range or (self.size_rand == self.chunksize).all() )
# draw from the list of tiled chunks only, set in init depending on parameters.
if setChunkList: self.setChunkList(chunk, self.chunk_tiled_list)
else:
chunk = None
return batchOffset, chunk
def getTiledBatch(self, data,aug_data,labels,seglabels, batchnum, aug=0):
batchOffset, chunk = self.getTiledBatchOffset(batchnum, setChunkList=True)
# get index and zslice. same method for regular or use_chunk_list modes.
ind0 = (batchOffset % self.batches_per_zslice)*self.num_cases_per_batch
zslc = batchOffset // self.batches_per_zslice * self.zslices_per_batch + self.nzslices//2;
assert( zslc < self.ntotal_zslice ) # usually fails then specified tiled batch is out of range of cube
inds = np.zeros((3,),dtype=self.cubeSubType)
chunk = self.cur_chunk if self.chunk_list_all else 0
for imgi in range(self.num_cases_per_batch):
inds[:] = self.inds_tiled[:,ind0 + imgi]; inds[2] += zslc
self.getAllDataAtPoint(inds,data,aug_data,imgi,aug,chunk=chunk)
self.getLblDataAtPoint(inds,labels[:,imgi],seglabels[:,imgi],aug,chunk=chunk)
# set to a specific chunk, re-initBatches if the new chunk is different from the current one
def setChunkList(self, chunk, chunk_list):
# should only be called from chunklist or chunkrange modes
assert(chunk >= 0 and chunk < len(chunk_list)) # usually fails when tiled batch is out of range of chunks
chunk = chunk_list[chunk] # chunk looks up in appropriate list (for rand or tiled chunks), set by ini
if self.use_chunk_range:
self.chunk_list_index = np.nonzero(chunk >= self.chunk_range_cumsize)[0][-1]
self.chunk_range_index = chunk - self.chunk_range_cumsize[self.chunk_list_index]
if (self.size_rand == self.chunksize).all():
# original mode
chunk_rand = np.unravel_index(self.chunk_range_index, self.chunk_range_rng[self.chunk_list_index,:]) \
+ self.chunk_range_beg[self.chunk_list_index,:]
else:
assert(False) # xxx - this hack is dangerous for debugging normal use, so comment this if needed
# this is something of a hack to allow for batches larger than chunksize
scale = self.size_rand // self.chunksize
chunk_rand = np.unravel_index(self.chunk_range_index, self.chunk_range_rng[self.chunk_list_index,:]) \
* scale + self.chunk_range_beg[self.chunk_list_index,:]
offset_rand = self.offset_list[self.chunk_list_index,:]
else:
self.chunk_list_index = chunk
chunk_rand = self.chunk_range_beg[chunk,:]; offset_rand = self.offset_list[chunk,:]
self.cur_chunk = chunk # started needing this for chunk_list_all mode
# compare with actual chunks and offsets here instead of index to avoid loading the first chunk twice
if (chunk_rand != self.chunk_rand).any() or (offset_rand != self.offset_rand).any():
if self.last_chunk_rand is None:
# special case incase there is no next chunk ever loaded (only one chunk being written)
self.last_chunk_rand = chunk_rand; self.last_offset_rand = offset_rand;
else:
self.last_chunk_rand = self.chunk_rand; self.last_offset_rand = self.offset_rand;
self.chunk_rand = chunk_rand; self.offset_rand = offset_rand;
if not self.chunk_list_all: self.initBatches(silent=not self.verbose)
elif self.last_chunk_rand is None:
# special case incase there is no next chunk ever loaded (only one chunk being written)
self.last_chunk_rand = chunk_rand; self.last_offset_rand = offset_rand;
def randChunkList(self):
assert( self.chunk_range_rand > 0 ) # do not request rand chunk with zero range
# should only be called from chunklist or chunkrange modes
# xxx - randomizing chunks performed very poorly, so removed in favor of chunk_list_all mode
#if self.chunk_list_rand:
# nextchunk = random.randrange(self.chunk_range_rand)
#else:
if self.use_chunk_range: nextchunk = (self.chunk_range_index+1) % self.chunk_range_rand
else: nextchunk = (self.chunk_list_index+1) % self.chunk_range_rand
# draw from the list of random chunks only, set in init depending on parameters.
self.setChunkList(nextchunk, self.chunk_rand_list)
def getAllDataAtPoint(self,inds,data,aug_data,imgi,aug=0,chunk=0):
self.getImgDataAtPoint(self.data_cube[chunk],inds,data[:,imgi],aug)
for i in range(self.naug_data):
self.getImgDataAtPoint(self.aug_data[chunk][i],inds,aug_data[i][:,imgi],aug)
def getImgDataAtPoint(self,data_cube,inds,data,aug):
# don't simplify this... it's integer math
selx = slice(inds[0]-self.image_size//2,inds[0]-self.image_size//2+self.image_size)
sely = slice(inds[1]-self.image_size//2,inds[1]-self.image_size//2+self.image_size)
selz = slice(inds[2]-self.nzslices//2,inds[2]-self.nzslices//2+self.nzslices)
#print data_cube[selx,sely,selz].shape, inds
data[:] = EMDataParser.augmentData(data_cube[selx,sely,selz].astype(np.single),
aug).transpose(2,0,1).flatten('C') # z last because channel data must be contiguous for convnet
def getLblDataAtPoint(self,inds,labels,seglabels,aug=0,chunk=0):
if labels.size == 0: return
# some conditions can not happen here, and this should have been asserted in getLabelMap
if not self.independent_labels:
assert( self.noutputs == 1 ) # just make sure
# image out size has to be one in this case, just take the center label
# the image in and out size must both be even or odd for this to work (asserted in init)
indsl = inds - self.labels_offset; labels[:] = self.labels_cube[chunk][indsl[0],indsl[1],indsl[2]]
seglabels[:] = self.segmented_labels_cube[chunk][indsl[0],indsl[1],indsl[2]]
else:
# NOTE from getLabelMap for border: make 3d for (convenience) in ortho reslice code, but always need same
# in xy dir and zero in z for lbl slicing. xxx - maybe too confusing, fix to just use scalar?
b = self.segmented_labels_border; indsl = inds - self.labels_offset + b
# don't simplify this... it's integer math
selx = slice(indsl[0]-self.seg_out_size//2,indsl[0]-self.seg_out_size//2+self.seg_out_size)
sely = slice(indsl[1]-self.seg_out_size//2,indsl[1]-self.seg_out_size//2+self.seg_out_size)
lbls = EMDataParser.augmentData(self.segmented_labels_cube[chunk][selx,sely,indsl[2]].reshape((\
self.seg_out_size,self.seg_out_size,1)),aug,order=0).reshape((self.seg_out_size,self.seg_out_size))
seglabels[:] = lbls.flatten('C')
# xxx - currently affinity labels assume a single pixel border around the segmented labels only.
# put other views here if decide to expand label border for selection (currently does not seem neccessary)
# also see note above, might be better to make segmented_labels_border a scalar
lblscntr = lbls[b[0]:-b[0],b[1]:-b[1]] if b[0] > 0 else lbls
# change the view on the output for easy assignment
lblsout = labels.reshape(self.oshape)
# this code needs to be consistent with (independent) label meanings defined in getLabelMap
if self.label_type == 'ICSorOUT':
if self.image_out_size==1:
# need at least two outputs for convnet (even if independent)
lblsout[lblscntr > 0,0] = 1; # ICS
lblsout[lblscntr == 0,1] = 1; # OUT
else:
lblsout[lblscntr > 0,0] = 1; # ICS
elif self.label_type == 'ICSorECSorMEM':
lblsout[np.logical_and(lblscntr > 0,lblscntr != self.ECS_label_value),0] = 1; # ICS
lblsout[lblscntr == self.ECS_label_value,1] = 1; # ECS
lblsout[lblscntr == 0,2] = 1; # MEM
elif self.label_type == 'ICSorECS':
lblsout[np.logical_and(lblscntr > 0,lblscntr != self.ECS_label_value),0] = 1; # ICS
lblsout[lblscntr == self.ECS_label_value,1] = 1; # ECS
elif self.label_type == 'ICSorMEM':
lblsout[np.logical_and(lblscntr > 0,lblscntr != self.ECS_label_value),0] = 1; # ICS
lblsout[lblscntr == 0,1] = 1; # MEM
elif self.label_type == 'affin2':
isICS = (lblscntr > 0)
lblsout[np.logical_and(isICS,np.diff(lbls[1:,1:-1],1,0) == 0),0] = 1;
lblsout[np.logical_and(isICS,np.diff(lbls[1:-1,1:],1,1) == 0),1] = 1;
elif self.label_type == 'affin4':
diff0 = np.diff(lbls[1:,1:-1],1,0)==0; diff1 = np.diff(lbls[1:-1,1:],1,1)==0
# affinities for ICS voxels
isICS = np.logical_and(lblscntr > 0, lblscntr != self.ECS_label_value)
lblsout[np.logical_and(isICS,diff0),0] = 1; lblsout[np.logical_and(isICS,diff1),1] = 1;
# affinities for ECS voxels
isECS = (lblscntr == self.ECS_label_value)
lblsout[np.logical_and(isECS,diff0),2] = 1; lblsout[np.logical_and(isECS,diff1),3] = 1;
elif self.label_type == 'affin6':
diff0 = np.diff(lbls[1:,1:-1],1,0)==0; diff1 = np.diff(lbls[1:-1,1:],1,1)==0
# affinities for ICS voxels
isICS = np.logical_and(lblscntr > 0, lblscntr != self.ECS_label_value)
lblsout[np.logical_and(isICS,diff0),0] = 1; lblsout[np.logical_and(isICS,diff1),1] = 1;
# affinities for ECS voxels
isECS = (lblscntr == self.ECS_label_value)
lblsout[np.logical_and(isECS,diff0),2] = 1; lblsout[np.logical_and(isECS,diff1),3] = 1;
# affinities for MEM voxels
isMEM = (lblscntr == 0)
lblsout[np.logical_and(isMEM,diff0),4] = 1; lblsout[np.logical_and(isMEM,diff1),5] = 1;
# originally this was single function for loading em data and labels.
# split into reading of labels and reading of data so that extra data can be read, i.e., augmented data
#
# Comments from original function regarding how data is loading to support reslices and C/F order:
# xxx - might think of a better way to "reslice" the dimensions later, for now, here's the method:
# read_direct requires the same size for the numpy array as in the hdf5 file. so if we're re-ordering the dims:
# (1) re-order the sizes to allocate here as if in original xyz order.
# (2) re-order the dims and sizes used in the *slices_from_indices functions into original xyz order.
# chunk indices are not changed.
# (3) at the end of this function re-order the data and labels into the specified dim ordering
# (4) the rest of the packager is then blind to the reslice dimension ordering
# NOTE ORIGINAL: chunk indices should be given in original hdf5 ordering.
# all other command line arguments should be given in the re-ordered ordering.
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
# NEW NOTE: had the re-ordering of command line inputs for reslice done automatically, meaning all inputs on
# command line should be given in original ordering, but they are re-ordered in re-slice order in init, so
# un-re-order here to go back to original ordering again (minimal overhead, done to reduce debug time).
#
# ulimately everything is accessed as C-order, but support loading from F-order hdf5 inputs.
# h5py requires that for read_direct data must be C order and contiguous. this means F-order must be dealt with
# "manually". for F-order the cube will be in C-order, but shaped like F-order, and then the view
# transposed back to C-order so that it's transparent in the rest of the code.
def readCubeToBuffers(self, chunkind=0):
if not self.silent: print('EMDataParser: Buffering data and labels chunk %d,%d,%d offset %d,%d,%d' % \
(self.chunk_rand[0], self.chunk_rand[1], self.chunk_rand[2],
self.offset_rand[0], self.offset_rand[1], self.offset_rand[2]))
c = chunkind
assert( c==0 or self.chunk_list_all ) # sanity check
self.data_cube[c], self.data_attrs, self.chunksize, self.datasize = \
self.loadData( self.data_cube[c], self.imagesrc, self.dataset )
self.segmented_labels_cube[c], self.label_attrs = self.loadSegmentedLabels(self.segmented_labels_cube[c])
# load augmented data cubes
for i in range(self.naug_data):
self.aug_data[c][i], data_attrs, chunksize, datasize = \
self.loadData( self.aug_data[c][i], self.augsrc, self.aug_datasets[i] )
if not self.silent: print('\tbuffered aug data ' + self.aug_datasets[i])
def loadData(self, data_cube, fname, dataset):
data_size = list(self.data_slice_size[i] for i in self.zreslice_dim_ordering)
size_rand = self.size_rand[self.zreslice_dim_ordering]; size_tiled = self.size_tiled[self.zreslice_dim_ordering]
if self.verbose and not self.silent: print('data slice size ' + str(self.data_slice_size) + \
' data size ' + str(data_size) + ' size rand ' + str(size_rand) + ' size tiled ' + str(size_tiled))
hdf = h5py.File(fname,'r')
if data_cube is None:
# for chunkrange / chunklist mode, this function is recalled, don't reallocate in this case
if self.hdf5_Corder:
data_cube = np.zeros(data_size, dtype=hdf[dataset].dtype, order='C')
else:
data_cube = np.zeros(data_size[::-1], dtype=hdf[dataset].dtype, order='C')
else:
# change back to the original view (same view changes as below, opposite order)
# zreslice un-re-ordering, so data is in original view in this function
data_cube = data_cube.transpose(self.zreslice_dim_ordering)
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
data_cube = data_cube.transpose(2,1,0)
# slice out the data hdf
ind = self.get_hdf_index_from_chunk_index(hdf[dataset], self.chunk_rand, self.offset_rand)
slc,slcd = self.get_data_slices_from_indices(ind, size_rand, data_size, False)
hdf[dataset].read_direct(data_cube, slc, slcd)
if self.nz_tiled > 0:
ind = self.get_hdf_index_from_chunk_index(hdf[dataset], self.chunk_tiled, self.offset_tiled)
slc,slcd = self.get_data_slices_from_indices(ind, size_tiled, data_size, True)
hdf[dataset].read_direct(data_cube, slc, slcd)
data_attrs = {}
for name,value in list(hdf[dataset].attrs.items()): data_attrs[name] = value
# xxx - this is only used for chunkrange mode currently, likely item to rethink...
chunksize = np.array(hdf[dataset].chunks, dtype=np.int64)
datasize = np.array(hdf[dataset].shape, dtype=np.int64) # not currently used
hdf.close()
# calculate mean and std over all of the data cube
#mean = float(data_cube.mean(dtype=np.float64)); std = float(data_cube.std(dtype=np.float64))
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
data_cube = data_cube.transpose(2,1,0)
chunksize = chunksize[::-1]; datasize = datasize[::-1]
# zreslice re-ordering, so data is in re-sliced order view outside of this function
data_cube = data_cube.transpose(self.zreslice_dim_ordering)
chunksize = chunksize[self.zreslice_dim_ordering]; datasize = datasize[self.zreslice_dim_ordering]
if self.verbose and not self.silent:
print('After re-ordering ' + fname + ' ' + dataset + ' data cube shape ' + str(data_cube.shape))
return data_cube, data_attrs, chunksize, datasize
def loadSegmentedLabels(self, segmented_labels_cube):
if self.no_labels: seglabels_size = [0, 0, 0]
else: seglabels_size = list(self.segmented_labels_slice_size[i] for i in self.zreslice_dim_ordering)
size_rand = self.size_rand[self.zreslice_dim_ordering]; size_tiled = self.size_tiled[self.zreslice_dim_ordering]
if self.verbose and not self.silent: print('seglabels size ' + str(seglabels_size) + \
' size rand ' + str(size_rand) + ' size tiled ' + str(size_tiled))
if segmented_labels_cube is None:
# for chunkrange / chunklist mode, this function is recalled, don't reallocate in this case
if self.hdf5_Corder:
segmented_labels_cube = np.zeros(seglabels_size, dtype=self.cubeLblType, order='C')
else:
segmented_labels_cube = np.zeros(seglabels_size[::-1], dtype=self.cubeLblType, order='C')
else:
# change back to the original view (same view changes as below, opposite order)
# zreslice un-re-ordering, so data is in original view in this function
segmented_labels_cube = segmented_labels_cube.transpose(self.zreslice_dim_ordering)
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
segmented_labels_cube = segmented_labels_cube.transpose(2,1,0)
# slice out the labels hdf except for no_labels mode (save memory)
hdf = h5py.File(self.labelsrc,'r');
if not self.no_labels:
ind = self.get_hdf_index_from_chunk_index(hdf[self.username], self.chunk_rand, self.offset_rand)
slc,slcd = self.get_label_slices_from_indices(ind, size_rand, seglabels_size, False)
hdf[self.username].read_direct(segmented_labels_cube, slc, slcd)
if self.nz_tiled > 0:
ind = self.get_hdf_index_from_chunk_index(hdf[self.username], self.chunk_tiled, self.offset_tiled)
slc,slcd = self.get_label_slices_from_indices(ind, size_tiled, seglabels_size, True)
hdf[self.username].read_direct(segmented_labels_cube, slc, slcd)
labels_attrs = {}
for name,value in list(hdf[self.username].attrs.items()): labels_attrs[name] = value
# these two only for validation that they are same as data cube
chunksize = np.array(hdf[self.username].chunks, dtype=np.int64)
datasize = np.array(hdf[self.username].shape, dtype=np.int64)
hdf.close()
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
segmented_labels_cube = segmented_labels_cube.transpose(2,1,0)
chunksize = chunksize[::-1]; datasize = datasize[::-1]
# zreslice re-ordering, so data is in re-sliced order view outside of this function
segmented_labels_cube = segmented_labels_cube.transpose(self.zreslice_dim_ordering)
chunksize = chunksize[self.zreslice_dim_ordering]; datasize = datasize[self.zreslice_dim_ordering]
if self.verbose and not self.silent: print('After re-ordering segmented labels cube shape ' + \
str(segmented_labels_cube.shape))
assert( (self.chunksize == chunksize).all() )
assert( (self.datasize == datasize).all() )
return segmented_labels_cube, labels_attrs
def get_hdf_index_from_chunk_index(self, hdf_dataset, chunk_index, offset):
datasize = np.array(hdf_dataset.shape, dtype=np.int64)
chunksize = np.array(hdf_dataset.chunks, dtype=np.int64)
nchunks = datasize//chunksize
if self.hdf5_Corder: ci = chunk_index
else: ci = chunk_index[::-1]
# chunk index is either given as origin-centered, or zero-based relative to corner
if self.origin_chunk_inds: ci = (ci + nchunks//2 + nchunks%2 - 1) # origin-centered chunk index
# always return the indices into the hdf5 in C-order
if self.hdf5_Corder: return ci*chunksize + offset
else: return (ci*chunksize)[::-1] + offset
# xxx - add asserts to check that data select is inbounds in hdf5, currently not a graceful error
def get_data_slices_from_indices(self, ind, size, dsize, isTiled):
xysel = self.zreslice_dim_ordering[0:2]; zsel = self.zreslice_dim_ordering[2]
beg = ind; end = ind + size
beg[xysel] = beg[xysel] - self.image_size//2; beg[zsel] = beg[zsel] - self.nzslices//2
end[xysel] = end[xysel] + self.image_size//2; end[zsel] = end[zsel] + self.nzslices//2
return self.get_slices_from_limits(beg,end,dsize,isTiled)
# xxx - add asserts to check that labels select is inbounds in hdf5, currently not a graceful error
def get_label_slices_from_indices(self, ind, size, dsize, isTiled):
#xysel = self.zreslice_dim_ordering[0:2];
zsel = self.zreslice_dim_ordering[2]
beg = ind - self.segmented_labels_border[self.zreslice_dim_ordering]
end = ind + size + self.segmented_labels_border[self.zreslice_dim_ordering]
beg[zsel] = beg[zsel] - self.nzslices//2; end[zsel] = end[zsel] + self.nzslices//2
return self.get_slices_from_limits(beg,end,dsize,isTiled)
def get_slices_from_limits(self, beg, end, size, isTiled):
zsel = self.zreslice_dim_ordering[2]
begd = np.zeros_like(size); endd = size;
if isTiled:
begd[zsel], endd[zsel] = self.nrand_zslice, self.ntotal_zslice
else:
begd[zsel], endd[zsel] = 0, self.nrand_zslice
if self.hdf5_Corder:
slc = np.s_[beg[0]:end[0],beg[1]:end[1],beg[2]:end[2]]
slcd = np.s_[begd[0]:endd[0],begd[1]:endd[1],begd[2]:endd[2]]
else:
slc = np.s_[beg[2]:end[2],beg[1]:end[1],beg[0]:end[0]]
slcd = np.s_[begd[2]:endd[2],begd[1]:endd[1],begd[0]:endd[0]]
return slc,slcd
# Don't need to pickle meta anymore as parser is instantiated and runs on-demand from EMDataProvider.
def makeBatchMeta(self):
if self.no_labels:
noutputs = 0; label_names = []
else:
noutputs = self.noutputs
label_names = self.indep_label_names_out if self.independent_labels else self.label_names
# do not re-assign meta dict so this works with chunklist mode (which reloads each time)
if not hasattr(self, 'batch_meta'): self.batch_meta = {}
b = self.batch_meta; b['num_cases_per_batch']=self.num_cases_per_batch; b['label_names']=label_names;
b['nlabels']=len(label_names); b['pixels_per_image']=self.pixels_per_image;
#b['scalar_data_mean']=data_mean; b['scalar_data_std']=data_std;
b['noutputs']=noutputs; b['num_pixels_per_case']=self.pixels_per_image;
if self.verbose and not self.silent: print(self.batch_meta)
# for debug only (standalone harness), DO NOT UNcomment these when running network, then count won't be saved
#self.batch_meta['prior_train_count'] = np.zeros((self.noutputs if self.independent_labels else self.nlabels,),
# dtype=np.int64)
#self.batch_meta['prior_total_count'] = np.zeros((1,),dtype=np.int64)
def setupAllLabels(self, chunkind=0):
c = chunkind # for chunk_list_all mode
assert( c==0 or self.chunk_list_all ) # sanity check
self.labels_cube[c] = self.setupLabels(self.labels_cube[c], self.segmented_labels_cube[c])
# labels_cube is used for label priors for selecting pixels for presentation to convnet.
# The actual labels are calculated on-demand using the segmented labels (not using the labels created here),
# unless NOT independent_labels in which case the labels cube is also the labels sent to the network.
def setupLabels(self, labels_cube, segmented_labels_cube):
# init labels to empty and return if no label mode
if self.no_labels:
return np.zeros((0,0,0), dtype=self.cubeLblType, order='C')
num_empty = (segmented_labels_cube == self.EMPTY_LABEL).sum()
assert( self.no_label_lookup or num_empty != segmented_labels_cube.size ) # a completely unlabeled chunk
if num_empty > 0:
if not self.silent: print('EMDataParser: WARNING: %d empty label voxels selected' % float(num_empty))
# ECS as a single label is used for some of the label types. figure out which label it is based on ini param
# need a separate variable for chunklist mode where the ECS label in different regions is likely different.
# xxx - this does not work in the cases where the ECS label has been set differently in adjacent regions.
# would likely have to go back to a fixed label for this, but not clear why this situation would come up.
if self.ECS_label == -2:
self.ECS_label_value = self.ECS_LABEL # specifies ECS label is single defined value (like EMPTY_LABEL)
elif self.ECS_label == -1:
# specifies that ECS is labeled with whatever the last label is, ignore the empty label
self.ECS_label_value = (segmented_labels_cube[segmented_labels_cube != self.EMPTY_LABEL]).max()
else:
self.ECS_label_value = self.ECS_label # use supplied value for ECS
if labels_cube is None:
# do not re-allocate if just setting up labels for a new cube for cubelist / cuberange mode
labels_cube = np.zeros(self.labels_slice_size, dtype=self.cubeLblType, order='C')
if self.select_label_type == 'ICS_OUT':
labels_cube[segmented_labels_cube == 0] = self.labels['OUT']
labels_cube[segmented_labels_cube > 0] = self.labels['ICS']
elif self.select_label_type == 'ICS_ECS_MEM':
labels_cube[segmented_labels_cube == 0] = self.labels['MEM']
labels_cube[np.logical_and(segmented_labels_cube > 0,
segmented_labels_cube != self.ECS_label_value)] = self.labels['ICS']
labels_cube[segmented_labels_cube == self.ECS_label_value] = self.labels['ECS']
elif self.select_label_type == 'ICS_OUT_BRD':
labels_cube[:] = self.labels['ICS']
labels_cube[np.diff(segmented_labels_cube[1:,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[0:-1,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,1:],1,1) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,0:-1],1,1) != 0] = self.labels['BORDER']
# xxx - this would highlight membrane areas that are near border also, better method of balancing priors?
labels_cube[np.logical_and(segmented_labels_cube[1:-1,1:-1] == 0,
labels_cube != self.labels['BORDER'])] = self.labels['OUT']
#labels_cube[segmented_labels_cube[1:-1,1:-1] == 0] = self.labels['OUT']
elif self.select_label_type == 'ICS_ECS_MEM_BRD':
labels_cube[:] = self.labels['ICS']
labels_cube[np.diff(segmented_labels_cube[1:,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[0:-1,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,1:],1,1) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,0:-1],1,1) != 0] = self.labels['BORDER']
# xxx - this would highlight membrane areas that are near border also, better method of balancing priors?
labels_cube[np.logical_and(segmented_labels_cube[1:-1,1:-1] == 0,
labels_cube != self.labels['BORDER'])] = self.labels['MEM']
#labels_cube[segmented_labels_cube[1:-1,1:-1] == 0] = self.labels['MEM']
labels_cube[segmented_labels_cube[1:-1,1:-1] == self.ECS_label_value] = self.labels['ECS']
else:
raise Exception('Unknown select_label_type ' + self.select_label_type)
return labels_cube
def getLabelMap(self):
# NEW: change how labels work so that labels that use priors for selection are seperate from how the labels are
# sent / interpreted by the network. First setup select_label_type maps.
# NOTE: the select labels are also the labels sent to the network if NOT independent_labels.
# in this case there also must only be one output voxel (multiple outputs not supported for
# mutually exclusive labels.
border = 0 # this is for label selects or label types that need bordering pixels around cube to fetch labels
# label names need to be in the same order as the indices in the labels map.
if self.select_label_type == 'ICS_OUT':
self.labels = {'OUT':0, 'ICS':1} # labels are binary, intracellular or outside (not intracellular)
self.label_names = ['OUT', 'ICS']
elif self.select_label_type == 'ICS_ECS_MEM':
self.labels = {'MEM':0, 'ICS':1, 'ECS':2} # label values for ICS, MEM and ECS fixed values
self.label_names = ['MEM', 'ICS', 'ECS']
elif self.select_label_type == 'ICS_OUT_BRD':
# for the priors for affinity just use three classes, OUT, ICS or border voxels
self.labels = {'OUT':0, 'BORDER':1, 'ICS':2}
self.label_names = ['OUT', 'BORDER', 'ICS']
border = 1
elif self.select_label_type == 'ICS_ECS_MEM_BRD':
# for the priors for affinity just use three classes, OUT, ICS or border voxels
self.labels = {'MEM':0, 'BORDER':1, 'ICS':2, 'ECS':3}
self.label_names = ['MEM', 'BORDER', 'ICS', 'ECS']
border = 1
# then setup "independent label names" which will be used to setup how labels are sent to network.
# these are the actual labels, the ones above are used for selecting voxels randomly based on label lookup
# using the label prior specified in the ini file (balancing).
if self.label_type == 'ICSorOUT':
if self.image_out_size==1:
# for single pixel output, use two independent outputs (convnet doesn't like single output)
self.indep_label_names = ['ICS', 'OUT']
else:
# one output per pixel
self.indep_label_names = ['ICS']
elif self.label_type == 'ICSorECSorMEM':
self.indep_label_names = ['ICS', 'ECS', 'MEM']
elif self.label_type == 'ICSorECS':
assert( self.independent_labels ) # this setup is intended for MEM to be encoded by no winner
# the labels used for selection are still the same, just learn outputs as 0, 0 for membrane
self.indep_label_names = ['ICS', 'ECS']
elif self.label_type == 'ICSorMEM':
assert( self.independent_labels ) # this setup is intended for ECS to be encoded by no winner
# the labels used for selection are still the same, just learn outputs as 0, 0 for ecs
self.indep_label_names = ['ICS', 'MEM']
elif self.label_type == 'affin2':
assert( self.independent_labels ) # xxx - can construct mutex labels, but decided no point to support this
# two output per pixel, the affinities in two directions
self.indep_label_names = ['DIM0POS', 'DIM1POS']
border = 1
elif self.label_type == 'affin4':
assert( self.independent_labels ) # xxx - can construct mutex labels, but decided no point to support this
# four outputs per pixel, the affinities in two directions for ICS and for ECS
self.indep_label_names = ['ICS_DIM0POS', 'ICS_DIM1POS', 'ECS_DIM0POS', 'ECS_DIM1POS']
border = 1
elif self.label_type == 'affin6':
assert( self.independent_labels ) # xxx - can construct mutex labels, but decided no point to support this
# six outputs per pixel, the affinities in two directions for ICS, ECS and MEM
self.indep_label_names = ['ICS_DIM0POS', 'ICS_DIM1POS', 'ECS_DIM0POS', 'ECS_DIM1POS',
'MEM_DIM0POS', 'MEM_DIM1POS']
border = 1
else:
raise Exception('Unknown label_type ' + self.label_type)
# this is for affinity graphs so there is no boundary problem at edges of segmented labels, default no boundary
# make 3d for (convenience) in ortho reslice code, but always need same in xy dir and zero in z for lbl slicing
self.segmented_labels_border = np.zeros((3,),dtype=np.int32); self.segmented_labels_border[0:2] = border
assert( self.independent_labels or border==0 ) # did not see the point in supporting this
self.nlabels = len(self.label_names)
self.nIndepLabels = len(self.indep_label_names)
self.indep_label_names_out = []
for i in range(self.pixels_per_out_image):
for j in range(self.nIndepLabels):
self.indep_label_names_out.append('%s_%d' % (self.indep_label_names[j], i))
self.noutputs = len(self.indep_label_names_out) if self.independent_labels else 1
# plotting code to validate that data / labels are being created / selected correctly
# matplotlib imshow does not swap the axes so need transpose to put first dim on x-axis (like in imagej, itk-snap)
def plotDataLbls(self,data,labels,seglabels,augs,pRand=True,doffset=0.0):
from matplotlib import pylab as pl
import matplotlib as plt
imgno = -1; interp_string = 'nearest' # 'none' not supported by slightly older version of matplotlib (meh)
# just keep bring up plots with EM data sample from batch range
while True:
pl.figure(1);
if ((not self.independent_labels or self.image_out_size==1) and self.label_type[0:6] != 'affin') or \
labels.size == 0:
# original mode, softmax labels for single output pixel
for i in range(4):
imgno = random.randrange(self.num_cases_per_batch) if pRand else imgno+1
pl.subplot(2,2,i+1)
# this is ugly, but data was previously validated using this plotting, so kept it, also below
slc = data[:,imgno].reshape(self.pixels_per_image,1).\
reshape(self.nzslices, self.image_size, self.image_size)[self.nzslices//2,:,:].\
reshape(self.image_size, self.image_size, 1) + doffset
# Repeat for the three color channels so plotting can occur normally (written for color images).
img = np.require(np.concatenate((slc,slc,slc), axis=2) / (255.0 if slc.max() > 1 else 1),
dtype=np.single)
if labels.size > 0:
# Put a red dot at the center pixel
img[self.image_size//2,self.image_size//2,0] = 1;
img[self.image_size//2,self.image_size//2,1] = 0;
img[self.image_size//2,self.image_size//2,2] = 0;
pl.imshow(img.transpose((1,0,2)),interpolation=interp_string);
if labels.size == 0:
pl.title('imgno %d' % imgno)
elif not self.independent_labels:
pl.title('label %s (%d), imgno %d' % (self.label_names[np.asscalar(labels[0,
imgno].astype(int))], np.asscalar(seglabels[0,imgno].astype(int)), imgno))
else:
lblstr = ' '.join(self.indep_label_names[s] for s in np.nonzero(labels[:,imgno])[0].tolist())
pl.title('label %s (%d), imgno %d' % (lblstr,np.asscalar(seglabels[0,imgno].astype(int)),imgno))
else:
# new mode, multiple output pixels, make two plots
for i in range(2):
imgno = random.randrange(self.num_cases_per_batch) if pRand else imgno+1
slc = data[:,imgno].reshape(self.pixels_per_image,1).\
reshape(self.nzslices, self.image_size, self.image_size)[self.nzslices//2,:,:].\
reshape(self.image_size, self.image_size, 1) + doffset
# Repeat for the three color channels so plotting can occur normally (written for color images).
img = np.require(np.concatenate((slc,slc,slc), axis=2) / 255.0, dtype=np.single)
imgA = np.require(np.concatenate((slc,slc,slc,
np.ones((self.image_size,self.image_size,1))*255), axis=2) / 255.0, dtype=np.single)
aug = augs[imgno] if len(augs) > 1 else augs[0]
alpha = 0.5 # overlay (independent) labels with data
pl.subplot(2,2,2*i+1)
lbls = labels[:,imgno].reshape(self.oshape)
print(lbls[:,:,0].reshape((self.image_out_size,self.image_out_size)))
if self.nIndepLabels > 1:
print(lbls[:,:,1].reshape((self.image_out_size,self.image_out_size)))
assert(self.nIndepLabels < 4) # need more colors for this
osz = self.image_out_size
rch = lbls[:,:,0].reshape(osz,osz,1)
gch = lbls[:,:,1].reshape(osz,osz,1) if self.nIndepLabels > 1 else np.zeros((osz,osz,1))
bch = lbls[:,:,2].reshape(osz,osz,1) if self.nIndepLabels > 2 else np.zeros((osz,osz,1))
if alpha < 1:
pl.imshow(imgA.transpose((1,0,2)),interpolation=interp_string)
imglbls = np.concatenate((rch,gch,bch,np.ones((osz,osz,1))*alpha), axis=2).astype(np.single);
imglbls[(lbls==0).all(2),3] = 0 # make background clear
img3 = np.zeros((self.image_size, self.image_size, 4), dtype=np.single, order='C')
b = self.image_size//2-osz//2; slc = slice(b,b+osz); img3[slc,slc,:] = imglbls;
pl.imshow(img3.transpose((1,0,2)),interpolation=interp_string)
else:
imglbls = np.concatenate((rch,gch,bch), axis=2).astype(np.single);
imgB = img; b = self.image_size//2-osz//2; slc = slice(b,b+osz); imgB[slc,slc,:] = imglbls;
pl.imshow(imgB.transpose((1,0,2)),interpolation=interp_string);
pl.title('label, imgno %d' % imgno)
#alpha = 0.6 # overlay segmented labels with data
pl.subplot(2,2,2*i+2)
seglbls = seglabels[:,imgno].reshape((self.seg_out_size,self.seg_out_size))
print(seglbls)
pl.imshow(imgA.transpose((1,0,2)),interpolation=interp_string)
m = pl.cm.ScalarMappable(norm=plt.colors.Normalize(), cmap=pl.cm.jet)
imgseg = m.to_rgba(seglbls % 256); imgseg[:,:,3] = alpha; imgseg[seglbls==0,3] = 0
img2 = np.zeros((self.image_size, self.image_size, 4), dtype=np.single, order='C')
b = self.image_size//2-self.seg_out_size//2; slc = slice(b,b+self.seg_out_size)
img2[slc,slc,:] = imgseg;
pl.imshow(img2.transpose((1,0,2)),interpolation=interp_string)
pl.title('seglabel, aug %d' % aug)
pl.show()
# simpler plotting for just data, useful for debugging preprocessing for autoencoders
def plotData(self,data,dataProc,pRand=True,image_size=0):
from matplotlib import pylab as pl
#import matplotlib as plt
if image_size < 1: image_size = self.image_size
imgno = -1; interp_string = 'nearest' # 'none' not supported by slightly older version of matplotlib (meh)
numpix = self.image_size*self.image_size
# just keep bring up plots with EM data sample from batch range
while True:
pl.figure(1);
# original mode, softmax labels for single output pixel
for i in range(2):
imgno = random.randrange(self.num_cases_per_batch) if pRand else imgno+1
pl.subplot(2,2,2*i+1)
slc = data[:,imgno].reshape(self.nzslices, image_size, image_size)[self.nzslices//2,:,:].\
reshape(image_size, image_size)
mx = slc.max(); mn = slc.min(); fc = np.isfinite(slc).sum()
h = pl.imshow(slc.transpose((1,0)),interpolation=interp_string); h.set_cmap('gray')
pl.title('orig imgno %d, min %.2f, max %.2f, naninf %d' % (imgno, mn, mx, numpix - fc))
pl.subplot(2,2,2*i+2)
slc = dataProc[:,imgno].reshape(self.nzslices, self.image_out_size,
self.image_out_size)[self.nzslices//2,:,:].reshape(self.image_out_size, self.image_out_size)
mx = slc.max(); mn = slc.min(); fc = np.isfinite(slc).sum()
h = pl.imshow(slc.transpose((1,0)),interpolation=interp_string); h.set_cmap('gray')
pl.title('preproc imgno %d, min %.2f, max %.2f, naninf %d' % (imgno, mn, mx, numpix - fc))
pl.show()
@staticmethod
def augmentData(d,augment,order=1):
if augment == 0: return d # no augmentation
if np.bitwise_and(augment,4): d = d.transpose(1,0,2) # tranpose x/y
if np.bitwise_and(augment,1): d = d[::-1,:,:] # reflect x
if np.bitwise_and(augment,2): d = d[:,::-1,:] # reflect y
if np.bitwise_and(augment,8): d = d[:,:,::-1] # reflect z
# elastic transform
if np.bitwise_and(augment,16):
assert(d.shape[2]==1)
d = EMDataParser.elastic_transform(d.reshape(d.shape[:2]), order=order)[:,:,None]
return d
# xxx - did not find this to be useful, either way to noisy / jittery for high alpha and low sigma
# or way to blurry for high alpha and high sigma, low alpha does almost nothing, as expected
# modified from https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
@staticmethod
def elastic_transform(image, alpha=8, sigma=2, order=3, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert len(image.shape)==2
if random_state is None:
#random_state = np.random.RandomState(None)
random_state = nr
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))
return map_coordinates(image, indices, order=order, mode='reflect').reshape(shape)
@staticmethod
def get_options(cfg_file):
config = ConfigObj(cfg_file,
configspec=os.path.join(os.path.dirname(os.path.realpath(__file__)),'parseEMdata.ini'))
# Validator handles missing / type / range checking
validator = Validator()
results = config.validate(validator, preserve_errors=True)
if results != True:
for (section_list, key, err) in flatten_errors(config, results):
if key is not None:
if not err:
print('EMDataParser: The "%s" key is missing in the following section(s):%s ' \
% (key, ', '.join(section_list)))
raise ValidateError
else:
print('EMDataParser: The "%s" key in the section(s) "%s" failed validation' \
% (key, ', '.join(section_list)))
raise err
elif section_list:
print('EMDataParser: The following section(s) was missing:%s ' % ', '.join(section_list))
raise ValidateError
return config
# xxx - moved logic out of convEMdata.py for better modularity, maybe can clean up more?
def checkOutputCubes(self, feature_path, batchnum, isLastbatch, outputs=None):
# for neon, allow outputs to be passed in without pickling
self.batch_outputs[self.batch_outputs_ind] = outputs
self.batch_outputs_ind = (self.batch_outputs_ind+1) % self.batches_per_rand_cube
if self.use_chunk_list:
# decide if it's appropriate to make output cubes (if at the end of the current chunk)
self.chunklistOutputCubes(feature_path, batchnum, isLastbatch)
elif isLastbatch:
# not chunklist mode, only write after all batches completed
self.makeOutputCubes(feature_path)
# special makeOutputCubes call for chunklist mode, only write if this is the last batch (overall or chunk)
def chunklistOutputCubes(self, feature_path, batchnum, isLastbatch):
assert( self.use_chunk_list ) # do not call me unless chunklist mode
batchOffset, chunk = self.getTiledBatchOffset(batchnum, setChunkList=False)
# write the output cubes if this is the last batch in current chunk or if this is the last overall batch
if isLastbatch or (batchOffset == (self.batches_per_rand_cube - 1)):
self.makeOutputCubes(feature_path, chunk*self.batches_per_rand_cube + self.FIRST_TILED_BATCH)
# prevents last chunk from being written twice (for isLastbatch, next chunk might not have loaded)
self.last_chunk_rand = self.chunk_rand; self.last_offset_rand = self.offset_rand;
if isLastbatch:
self.start_queue.put(None)
self.probs_output_proc.join()
# the EM data "unpackager", recreate probablity cubes using exported output features from convnet
def makeOutputCubes(self, feature_path='', batchnum=-1):
print('EMDataParser: Loading exported features')
cpb = self.num_cases_per_batch; size = self.image_out_size;
npix = self.pixels_per_out_image; nout = self.noutputs;
# labels in this context are the labels per output pixel
if self.independent_labels: nlabels = self.nIndepLabels; label_names = self.indep_label_names
else: nlabels = self.nlabels; label_names = self.label_names
# allow the starting batch to be passed in (for chunklist mode)
if batchnum < 0: batchnum = self.FIRST_TILED_BATCH
if self.verbose:
print('ntiles_per_zslice %d zslices_per_batch %d tiled shape %d %d cpb %d' % (self.batches_per_zslice,
self.zslices_per_batch, self.inds_tiled_out.shape[0],self.inds_tiled_out.shape[1], cpb))
# initial shape of probs out depends on single or multiple output pixels
if size > 1: probs_out_shape = self.output_size + [nout]
else: probs_out_shape = self.labels_slice_size + (nlabels,)
# allocate the outputs to be written to hdf5, any pixels from missing batches are filled with EMPTY_PROB
if hasattr(self, 'probs_out'):
# do not reallocate in chunklist mode, but reshape (shape changes below for multiple output pixels)
self.probs_out[:] = self.EMPTY_PROB
self.probs_out = self.probs_out.reshape(probs_out_shape)
else:
self.probs_out = self.EMPTY_PROB * np.ones(probs_out_shape, dtype=np.float32, order='C')
# get training prior if present in the meta
if 'prior_train_count' in self.batch_meta:
# calculate the training prior based on the actual labels that have been presented to the network
prior_train = self.batch_meta['prior_train_count'] / self.batch_meta['prior_total_count'].astype(np.double)
# if the training prior counts have been saved and the test prior is specified in the ini,
# then enable prior rebalancing on the output probabilities.
prior_export = self.prior_test.all() and 'prior_train_count' in self.batch_meta
if prior_export:
# make sure the test (export) prior is legit
assert( (self.prior_test > 0).all() and (self.prior_test < 1).all() ) # test priors must be probs
# only for independent labels with independent prior test can test prior not sum to 1
if not self.independent_labels or not self.prior_test_indep: assert( self.prior_test.sum() == 1 )
if not self.independent_labels or self.prior_test_indep or (self.prior_test.size == nlabels):
# normal case, test_priors are for labels or independent labels
assert( self.prior_test.size == nlabels ) # test priors must be for labels or independent label types
if self.independent_labels:
# repeat prior_test for all output pixels
prior_test = self.prior_test.reshape((1,-1)).repeat(npix, axis=0).reshape((nout,))
if self.prior_test_indep:
# in this case each output is independently bayesian reweighted against the not output
prior_nottest_to_nottrain = (1 - prior_test) / (1 - prior_train)
else:
prior_test = self.prior_test
else:
# allow the last class to be encoded as all zeros, so prob is 1-sum others
assert( self.prior_test.size == nlabels+1 )
noutp = npix*(nlabels+1)
prior_test = self.prior_test.reshape((1,-1)).repeat(npix, axis=0).reshape((noutp,))
prior_test_labels = self.prior_test[0:-1].reshape((1,-1)).repeat(npix, axis=0).reshape((nout,))
prior_train_labels = prior_train
prior_test_to_train_labels = (prior_test_labels / prior_train_labels).reshape((1,size,size,nlabels))
ptall = prior_train.reshape((size,size,nlabels))
prior_train = np.concatenate((ptall, 1-ptall.sum(axis=2,keepdims=True)), axis=2).reshape((noutp,))
# calculate ratio once here to avoid doing it every loop iteration below
prior_test_to_train = prior_test / prior_train
if self.independent_labels and not self.prior_test_indep:
if self.prior_test.size == nlabels:
prior_test_to_train = prior_test_to_train.reshape((1,size,size,nlabels))
else:
prior_test_to_train = prior_test_to_train.reshape((1,size,size,nlabels+1))
# load the pickled output batches and assign based on tiled indices created in packager (makeTiledIndices)
cnt = 0
for z in range(0,self.ntotal_zslice,self.zslices_per_batch):
for t in range(self.batches_per_zslice):
# allows for data to either be unpickled, or saved in memory for each "chunk" (neon mode)
d = None
if feature_path:
batchfn = os.path.join(feature_path,'data_batch_%d' % batchnum)
if os.path.isfile(batchfn):
infile = open(batchfn, 'rb'); d = myPickle.load(infile); infile.close(); d = d['data']
# batches take up way too make space for "large dumps" so remove them in append_features mode
if self.append_features: os.remove(batchfn)
else:
d = self.batch_outputs[cnt]; self.batch_outputs[cnt] = None
if d is not None:
if prior_export:
# apply Bayesian reweighting, either independently or over the labels set
if self.independent_labels and self.prior_test_indep:
# sum is with the not target for independent outputs
adjusted = d*prior_test_to_train; d = adjusted / (adjusted+(1-d)*prior_nottest_to_nottrain)
elif self.independent_labels:
if self.prior_test.size != nlabels:
# need 1 - sum for last label type (encoded as all zeros)
dshp = d.reshape((cpb,size,size,nlabels))
# rectify incase existing probs sum over one
other_dshp = 1-dshp.sum(axis=3,keepdims=True); other_dshp[other_dshp < 0] = 0
d = (dshp*prior_test_to_train_labels / (np.concatenate((dshp, other_dshp),
axis=3)*prior_test_to_train).sum(axis=3, keepdims=True)).reshape((cpb,nout))
else:
dshp = d.reshape((cpb,size,size,nlabels)); adjusted = dshp*prior_test_to_train;
d = (adjusted / adjusted.sum(axis=3,keepdims=True)).reshape((cpb,nout))
else:
# sum is over the labels
adjusted = d*prior_test_to_train; d = adjusted / adjusted.sum(axis=1,keepdims=True)
begr = t*cpb; endr = begr + cpb
self.probs_out[self.inds_tiled_out[begr:endr,0],self.inds_tiled_out[begr:endr,1],
self.inds_tiled_out[begr:endr,2] + z,:] = d
batchnum += 1; cnt += 1
if size > 1:
# xxx - oh yah, this makes sense, see comments in makeTiledIndices
self.probs_out = self.probs_out.reshape(self.output_size + [size, size,
nlabels]).transpose((0,3,1,4,2,5)).reshape(self.labels_slice_size + (nlabels,))
# which prior counts will be written out
if 'prior_train_count' in self.batch_meta:
if not prior_export or self.prior_test.size == nlabels:
prior_write = prior_train.reshape((size,size,nlabels))
else:
prior_write = prior_train_labels.reshape((size,size,nlabels))
if self.write_outputs:
print('EMDataParser: Creating hdf5 output containing label probabilities')
if not os.path.exists(self.outpath): os.makedirs(self.outpath)
# write probs in F-order, use separate variable names in hdf file
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVOUT), 'w');
# output probability for each output if requested
for n in range(nlabels):
outfile.create_dataset(label_names[n], data=self.probs_out[:,:,:,n].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
# copy any attributes over
for name,value in list(self.data_attrs.items()):
outfile[label_names[n]].attrs.create(name,value)
self.write_prior_hdf5(prior_export, prior_write)
outfile.close()
if self.append_features_knossos:
print('EMDataParser: Appending to knossos-style output containing label probabilities "%s" at %d %d %d' % \
(self.outpath, self.last_chunk_rand[0], self.last_chunk_rand[1], self.last_chunk_rand[2]))
ind = self.last_chunk_rand
elif self.append_features:
# write outputs probabilities to a big hdf5 that spans entire dataset, used for "large feature dumps".
# always writes in F-order (inputs can be either order tho)
assert( self.nz_tiled == 0 ) # use the rand cube only for "large feature dumps"
hdf = h5py.File(self.imagesrc,'r')
if not os.path.isfile(self.outpath):
print('EMDataParser: Creating global hdf5 output containing label probabilities "%s"' % self.outpath)
# create an output prob hdf5 file (likely for a larger dataset, this is how outputs are "chunked")
outfile = h5py.File(self.outpath, 'w');
for n in range(nlabels):
# get the shape and chunk size from the data hdf5. if this file is in F-order, re-order to C-order
shape = list(hdf[self.dataset].shape); chunks = list(hdf[self.dataset].chunks)
if not self.hdf5_Corder:
shape = shape[::-1]; chunks = chunks[::-1]
# now re-order the dims based on the specified re-ordering and then re-order back to F-order
shape = list(shape[i] for i in self.zreslice_dim_ordering)
chunks = list(chunks[i] for i in self.zreslice_dim_ordering)
shape = shape[::-1]; chunks = tuple(chunks[::-1])
outfile.create_dataset(label_names[n], shape=shape, dtype=np.float32, compression='gzip',
#compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True, fillvalue=-1.0, chunks=chunks)
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True, fillvalue=0.0, chunks=chunks)
# copy the attributes over
for name,value in list(self.data_attrs.items()):
outfile[label_names[n]].attrs.create(name,value)
self.write_prior_hdf5(prior_export, prior_write, outfile)
outfile.close()
print('EMDataParser: Appending to global hdf5 output containing label probabilities "%s" at %d %d %d' % \
(self.outpath, self.last_chunk_rand[0], self.last_chunk_rand[1], self.last_chunk_rand[2]))
# always write outputs in F-order
ind = self.get_hdf_index_from_chunk_index(hdf[self.dataset], self.last_chunk_rand,
self.last_offset_rand)
ind = ind[self.zreslice_dim_ordering][::-1] # re-order for specified ordering, then to F-order
hdf.close()
if self.append_features:
# parallel using multiprocessing, threading does not work
if not hasattr(self, 'done_queue'):
# initialize
self.start_queue = mp.Queue()
self.done_queue = mp.Queue()
self.shared_probs_out = sharedmem.empty_like(self.probs_out)
self.shared_ind = sharedmem.empty_like(ind)
if self.append_features_knossos:
self.probs_output_proc = mp.Process(target=handle_knossos_prob_output,
args=(self.start_queue, self.done_queue, self.shared_probs_out,
self.shared_ind, label_names, self.outpath,self.strnetid))
else:
self.probs_output_proc = mp.Process(target=handle_hdf5_prob_output,
args=(self.start_queue, self.done_queue, self.shared_probs_out,
self.shared_ind, label_names, self.outpath))
self.probs_output_proc.start()
else:
self.done_queue.get()
self.shared_probs_out[:] = self.probs_out; self.shared_ind[:] = ind
self.start_queue.put(1)
## non-parallel version
#outfile = h5py.File(self.outpath, 'r+');
#for n in range(nlabels):
# d = self.probs_out[:,:,:,n].transpose((2,1,0)); dset = outfile[label_names[n]]
# #print ind, d.shape, dset.shape
# dset[ind[0]:ind[0]+d.shape[0],ind[1]:ind[1]+d.shape[1],ind[2]:ind[2]+d.shape[2]] = d
#outfile.close()
def write_prior_hdf5(self, prior_export, d, outfile):
# for both modes, write out the priors, if prior reweighting enabled
# write a new dataset with the on-the-fly calculated training prior for each label type
if 'prior_train_count' in self.batch_meta:
#outfile = h5py.File(self.outpath, 'r+');
outfile.create_dataset(self.PRIOR_DATASET, data=d.transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
if prior_export:
print('EMDataParser: Exported with Bayesian prior reweighting')
outfile[self.PRIOR_DATASET].attrs.create('prior_test',self.prior_test)
else:
print('EMDataParser: Exported training prior but output not reweighted')
#outfile.close()
# for test
if __name__ == '__main__':
dp = EMDataParser(sys.argv[1:][0], write_outputs=False)
#dp = EMDataParser(sys.argv[1:][0], False, '', 'meh1')
#dp.no_label_lookup = True
dp.initBatches()
#dp.makeOutputCubes(sys.argv[1:][1])
nBatches = 10;
# test rand batches
#for i in range(nBatches): dp.getBatch(i+1, True)
#for i in range(dp.FIRST_RAND_NOLOOKUP_BATCH,dp.FIRST_RAND_NOLOOKUP_BATCH+nBatches): dp.getBatch(i+1, True)
# test tiled batches
batchOffset = 0;
for i in range(dp.FIRST_TILED_BATCH+batchOffset,dp.FIRST_TILED_BATCH+batchOffset+nBatches): dp.getBatch(i,True,16)
| mit |
lucidfrontier45/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 3 | 4094 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_greater
from sklearn.linear_model import logistic
from sklearn import datasets
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = datasets.load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(logistic.LogisticRegression(), X, Y1)
check_predictions(logistic.LogisticRegression(), X_sp, Y1)
check_predictions(logistic.LogisticRegression(C=100), X, Y1)
check_predictions(logistic.LogisticRegression(C=100), X_sp, Y1)
check_predictions(logistic.LogisticRegression(fit_intercept=False),
X, Y1)
check_predictions(logistic.LogisticRegression(fit_intercept=False),
X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, logistic.LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(logistic.LogisticRegression(C=10), X, Y2)
check_predictions(logistic.LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logisic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = logistic.LogisticRegression(C=len(iris.data)).fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = logistic.LogisticRegression()
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
#rng = np.random.RandomState(0)
#X = rng.random_sample((5, 10))
#y = np.ones(X.shape[0])
clf = logistic.LogisticRegression()
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic.LogisticRegression().fit(Xnan, Y1)
def test_liblinear_random_state():
X, y = datasets.make_classification(n_samples=20)
lr1 = logistic.LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = logistic.LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
| bsd-3-clause |
Xeralux/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 9 | 4605 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
from tensorflow.python.util.tf_export import tf_export
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
@tf_export('estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/dtypes/cast.py | 3 | 34125 | """ routings for casting """
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas._libs import tslib, lib
from pandas._libs.tslib import iNaT
from pandas.compat import string_types, text_type, PY3
from .common import (_ensure_object, is_bool, is_integer, is_float,
is_complex, is_datetimetz, is_categorical_dtype,
is_datetimelike,
is_extension_type, is_object_dtype,
is_datetime64tz_dtype, is_datetime64_dtype,
is_timedelta64_dtype, is_dtype_equal,
is_float_dtype, is_complex_dtype,
is_integer_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype, is_scalar,
_string_dtypes,
pandas_dtype,
_ensure_int8, _ensure_int16,
_ensure_int32, _ensure_int64,
_NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES)
from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype
from .generic import (ABCDatetimeIndex, ABCPeriodIndex,
ABCSeries)
from .missing import isnull, notnull
from .inference import is_list_like
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj):
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj):
if any(isinstance(v, ABCSeries) for v in obj.values):
return True
return False
def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if is_scalar(result):
return result
def trans(x):
return x
if isinstance(dtype, string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
def trans(x): # noqa
return x.round()
else:
dtype = 'object'
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if (result.dtype.itemsize <= dtype.itemsize and
np.prod(result.shape)):
return result
if issubclass(dtype.type, np.floating):
return result.astype(dtype)
elif is_bool_dtype(dtype) or is_integer_dtype(dtype):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
# if we have any nulls, then we are done
if (isnull(arr).any() or
not np.allclose(arr, trans(arr).astype(dtype), rtol=0)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notnull(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result, rtol=0):
return new_result
except:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
# a datetimelike
# GH12821, iNaT is casted to float
elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']:
try:
result = result.astype(dtype)
except:
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize('utc')
result = result.tz_convert(dtype.tz)
except:
pass
return result
def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
"""
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if is_datetimelike(result.dtype):
if is_scalar(other):
if isnull(other):
other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
except:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (is_scalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isnull(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isnull(other[mask]).any():
return changeit()
try:
np.place(result, mask, other)
except:
return changeit()
return result, False
def maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
# for now: refuse to upcast datetime64
# (this is because datetime64 will not implicitly upconvert
# to object correctly as of numpy 1.6.1)
if isnull(fill_value):
fill_value = iNaT
else:
if issubclass(dtype.type, np.datetime64):
try:
fill_value = lib.Timestamp(fill_value).value
except:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
fill_value = iNaT
elif issubclass(dtype.type, np.timedelta64):
try:
fill_value = lib.Timedelta(fill_value).value
except:
# as for datetimes, cannot upcast to object
fill_value = iNaT
else:
fill_value = iNaT
elif is_datetimetz(dtype):
if isnull(fill_value):
fill_value = iNaT
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = iNaT
else:
dtype = np.object_
else:
dtype = np.object_
# in case we have a string that looked like a number
if is_categorical_dtype(dtype):
pass
elif is_datetimetz(dtype):
pass
elif issubclass(np.dtype(dtype).type, string_types):
dtype = np.object_
return dtype, fill_value
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
if val.ndim != 0:
raise ValueError(
"invalid ndarray passed to _infer_dtype_from_scalar")
dtype = val.dtype
val = val.item()
elif isinstance(val, string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslib.Timestamp(val)
if val is tslib.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslib.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val
def infer_dtype_from_array(arr):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
Returns
-------
tuple (numpy-compat dtype, array)
Notes
-----
These infer to numpy dtypes exactly
with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explict type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def maybe_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
def invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def maybe_convert_string_to_object(values):
"""
Convert string-like and string-like array to convert object dtype.
This is to avoid numpy to handle the array as str dtype.
"""
if isinstance(values, string_types):
values = np.array([values], dtype=object)
elif (isinstance(values, np.ndarray) and
issubclass(values.dtype.type, (np.string_, np.unicode_))):
values = values.astype(object)
return values
def maybe_convert_scalar(values):
"""
Convert a python scalar to the appropriate numpy dtype if possible
This avoids numpy directly converting according to platform preferences
"""
if is_scalar(values):
dtype, values = infer_dtype_from_scalar(values)
try:
values = dtype(values)
except TypeError:
pass
return values
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
l = len(categories)
if l < _int8_max:
return _ensure_int8(indexer)
elif l < _int16_max:
return _ensure_int16(indexer)
elif l < _int32_max:
return _ensure_int32(indexer)
return _ensure_int64(indexer)
def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
def conv(r, dtype):
try:
if isnull(r):
pass
elif dtype == _NS_DTYPE:
r = lib.Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel()).reshape(arr.shape)
elif issubclass(dtype.type, string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return tslib.ints_to_pytimedelta(arr.view(np.int64))
# in py3, timedelta64[ns] are int64
elif ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
if dtype.kind == 'm':
mask = isnull(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if not np.isfinite(arr).all():
raise ValueError('Cannot convert non-finite values (NA or inf) to '
'integer')
elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):
# work around NumPy brokenness, #1987
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
if dtype.name in ("datetime64", "timedelta64"):
msg = ("Passing in '{dtype}' dtype with no frequency is "
"deprecated and will raise in a future version. "
"Please pass in '{dtype}[ns]' instead.")
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = np.dtype(dtype.name + "[ns]")
if copy:
return arr.astype(dtype)
return arr.view(dtype)
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = maybe_cast_to_datetime(
values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.core.tools.timedeltas import to_timedelta
new_values = to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
except:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
from pandas import to_datetime
return to_datetime(values, errors='coerce', box=False)
elif timedelta:
from pandas import to_timedelta
return to_timedelta(values, errors='coerce', box=False)
elif numeric:
from pandas import to_numeric
return to_numeric(values, errors='coerce')
# Soft conversions
if datetime:
values = lib.maybe_convert_objects(values, convert_datetime=datetime)
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isnull(converted).all() else values
values = values.copy() if copy else values
except:
pass
return values
def maybe_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M' or kind == 'm':
return is_datetime64_dtype(arr.dtype)
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(value, convert_dates=False):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : boolean, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex)):
return value
elif isinstance(value, ABCSeries):
if isinstance(value._values, ABCDatetimeIndex):
return value._values
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
v = tslib.array_to_datetime(v, errors='raise')
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype
try:
from pandas import to_datetime
return to_datetime(v)
except:
pass
except:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
return to_timedelta(v)._values.reshape(shape)
except:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(_ensure_object(v))
if inferred_type == 'date' and convert_dates:
value = try_datetime(v)
elif inferred_type == 'datetime':
value = try_datetime(v)
elif inferred_type == 'timedelta':
value = try_timedelta(v)
elif inferred_type == 'nat':
# if all NaT, return as datetime
if isnull(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but
# technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype, errors='raise'):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
if dtype is not None:
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# force the dtype if needed
msg = ("Passing in '{dtype}' dtype with no frequency is "
"deprecated and will raise in a future version. "
"Please pass in '{dtype}[ns]' instead.")
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name in ('datetime64', 'datetime64[ns]'):
if dtype.name == 'datetime64':
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
"dtype [%s]" % dtype)
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isnull(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name in ('timedelta64', 'timedelta64[ns]'):
if dtype.name == 'timedelta64':
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
"dtype [%s]" % dtype)
if is_scalar(value):
if value == iNaT or isnull(value):
value = iNaT
else:
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype,
dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)._values
elif is_datetime64tz:
# input has to be UTC at this point, so just
# localize
value = (to_datetime(value, errors=errors)
.tz_localize('UTC')
.tz_convert(dtype.tz)
)
elif is_timedelta64:
value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
ints = np.asarray(value).view('i8')
return tslib.ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError('Cannot cast datetime64 to %s' % dtype)
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ['M', 'm']:
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
value = maybe_infer_to_datetimelike(value)
return value
def find_common_type(types):
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, ExtensionDtype) for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
has_ints = any(is_integer_dtype(t) for t in types)
has_floats = any(is_float_dtype(t) for t in types)
has_complex = any(is_complex_dtype(t) for t in types)
if has_ints or has_floats or has_complex:
return np.object
return np.find_common_type(types, [])
| mit |
rs2/pandas | pandas/tests/window/test_numba.py | 1 | 2942 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import Series, option_context
import pandas._testing as tm
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
@td.skip_if_no("numba", "0.46.0")
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestApply:
@pytest.mark.parametrize("jit", [True, False])
def test_numba_vs_cython(self, jit, nogil, parallel, nopython, center):
def f(x, *args):
arg_sum = 0
for arg in args:
arg_sum += arg
return np.mean(x) + arg_sum
if jit:
import numba
f = numba.jit(f)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
args = (2,)
s = Series(range(10))
result = s.rolling(2, center=center).apply(
f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = s.rolling(2, center=center).apply(
f, engine="cython", args=args, raw=True
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("jit", [True, False])
def test_cache(self, jit, nogil, parallel, nopython):
# Test that the functions are cached correctly if we switch functions
def func_1(x):
return np.mean(x) + 4
def func_2(x):
return np.std(x) * 5
if jit:
import numba
func_1 = numba.jit(func_1)
func_2 = numba.jit(func_2)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = Series(range(10)).rolling(2)
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# func_1 should be in the cache now
assert (func_1, "rolling_apply") in NUMBA_FUNC_CACHE
result = roll.apply(
func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_2, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# This run should use the cached func_1
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
@td.skip_if_no("numba", "0.46.0")
def test_use_global_config():
def f(x):
return np.mean(x) + 2
s = Series(range(10))
with option_context("compute.use_numba", True):
result = s.rolling(2).apply(f, engine=None, raw=True)
expected = s.rolling(2).apply(f, engine="numba", raw=True)
tm.assert_series_equal(expected, result)
| bsd-3-clause |
billyhunt/osf.io | tasks/__init__.py | 1 | 27868 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from time import sleep
import invoke
from invoke import run, Collection
from website import settings
from admin import tasks as admin_tasks
from utils import pip_install, bin_prefix
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
ns = Collection()
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, wait=True):
"""Run the API server."""
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {} --nothreading'.format(sys.executable, port)
if wait:
return run(cmd, echo=True, pty=True)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(port=8001):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {} --nothreading'.format(env, port)
run(cmd, echo=True, pty=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
{transaction}
Available variables:
{context}
"""
TRANSACTION_WARNING = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag.
"""
def make_shell_context(auto_transact=True):
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
from framework.transactions import commands
from framework.transactions import context as tcontext
app = init_app()
def commit():
commands.commit()
print('Transaction committed.')
if auto_transact:
commands.begin()
print('New transaction opened.')
def rollback():
commands.rollback()
print('Transaction rolled back.')
if auto_transact:
commands.begin()
print('New transaction opened.')
context = {
'transaction': tcontext.TokuTransaction,
'start_transaction': commands.begin,
'commit': commit,
'rollback': rollback,
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
if auto_transact:
commands.begin()
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell(transaction=True):
context = make_shell_context(auto_transact=transaction)
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context),
transaction=TRANSACTION_WARNING if transaction else ''
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug", hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.celery_tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(level="debug", schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.celery_tasks -l {0}'.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
run(bin_prefix(cmd), pty=True)
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
inv requirements --metrics
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_api():
"""Run the API test suite."""
test_module(module="api_tests/")
@task
def test_admin():
"""Run the Admin test suite."""
# test_module(module="admin_tests/")
module = "admin_tests/"
verbosity = 0
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings" '
# Use pty so the process buffers "correctly"
run(env + bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_varnish():
"""Run the Varnish test suite."""
proc = apiserver(wait=False)
sleep(5)
test_module(module="api/caching/tests/test_caching.py")
proc.kill()
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
test_api()
test_admin()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_osf():
"""
Run half of the tests to help travis go faster
"""
flake()
jshint()
test_osf()
@task
def test_travis_else():
"""
Run other half of the tests to help travis go faster
"""
test_addons()
test_api()
test_admin()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_varnish():
test_varnish()
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def build_js_config_files():
from website import settings
from website.app import build_js_config_files as _build_js_config_files
print('Building JS config files...')
_build_js_config_files(settings)
print("...Done.")
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
build_js_config_files()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
@task
def clean(verbose=False):
run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage():
run('invoke --list')
| apache-2.0 |
vermouthmjl/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 12 | 34972 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
projectcuracao/projectcuracao | graphprep/powersupplygraph.py | 1 | 3688 | # power graph generation
# filename: powersupplygraph.py
# Version 1.3 09/12/13
#
# contains event routines for data collection
#
#
import sys
import time
import RPi.GPIO as GPIO
import gc
import datetime
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib import dates
import pylab
import MySQLdb as mdb
sys.path.append('/home/pi/ProjectCuracao/main/config')
# if conflocal.py is not found, import default conf.py
# Check for user imports
try:
import conflocal as conf
except ImportError:
import conf
def powersystemsupplygraph(source,days,delay):
print("powersystemsupplygraph source:%s days:%s delay:%i" % (source,days,delay))
print("sleeping :",delay)
time.sleep(delay)
print("powesystemsupplygraph running now")
# blink GPIO LED when it's run
GPIO.setmode(GPIO.BOARD)
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
time.sleep(0.5)
GPIO.output(22, True)
# now we have get the data, stuff it in the graph
try:
print("trying database")
db = mdb.connect('localhost', 'root', conf.databasePassword, 'ProjectCuracao');
cursor = db.cursor()
#query = "SELECT TimeStamp, SolarOutputCurrent FROM powersubsystemdata where now() - interval 7 day < TimeStamp"
query = "SELECT TimeStamp, SolarOutputCurrent, BatteryOutputCurrent, PiInputCurrent FROM powersubsystemdata where now() - interval %i hour < TimeStamp" % (days*24)
#query = "SELECT TimeStamp, SolarOutputCurrent, BatteryOutputCurrent, PiInputCurrent, PowerEfficiency FROM powersubsystemdata where now() - interval %i hour < TimeStamp" % (days*24)
cursor.execute(query)
result = cursor.fetchall()
t = []
s = []
u = []
v = []
#x = []
for record in result:
t.append(record[0])
s.append(record[1])
u.append(record[2])
v.append(record[3])
#x.append(record[4])
print ("count of t=",len(t))
#print (t)
#dts = map(datetime.datetime.fromtimestamp, t)
#print dts
fds = dates.date2num(t) # converted
# matplotlib date format object
hfmt = dates.DateFormatter('%m/%d-%H')
fig = pyplot.figure()
fig.set_facecolor('white')
ax = fig.add_subplot(111,axisbg = 'white')
ax.vlines(fds, -200.0, 1000.0,colors='w')
ax.xaxis.set_major_locator(dates.HourLocator(interval=6))
ax.xaxis.set_major_formatter(hfmt)
ax.set_ylim(bottom = -200.0)
pyplot.xticks(rotation='vertical')
pyplot.subplots_adjust(bottom=.3)
pylab.plot(t, s, color='b',label="Solar",linestyle="-",marker=".")
pylab.plot(t, u, color='r',label="Battery",linestyle="-",marker=".")
pylab.plot(t, v, color='g',label="Pi Input",linestyle="-",marker=".")
#pylab.plot(t, x, color='m',label="Power Eff",linestyle="-",marker=".")
pylab.xlabel("Hours")
pylab.ylabel("Current ma")
pylab.legend(loc='upper left')
if (max(v) > max(s)):
myMax = max(v)
else:
myMax = max(s)
pylab.axis([min(t), max(t), min(u), myMax])
#pylab.title(("Pi System Power Last %i Days" % days),ha='right')
pylab.figtext(.5, .05, ("Pi System Power Last %i Days" % days),fontsize=18,ha='center')
pylab.grid(True)
pyplot.show()
pyplot.savefig("/home/pi/RasPiConnectServer/static/systempower.png",facecolor=fig.get_facecolor())
#pyplot.savefig("/home/pi/RasPiConnectServer/static/systempower.png",facecolor='w', edgedcolor='w',frameon=True,transparent=True)
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
finally:
cursor.close()
db.close()
del cursor
del db
fig.clf()
pyplot.close()
pylab.close()
del t, s, u, v
gc.collect()
print("systempower finished now")
| gpl-3.0 |
Garrett-R/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 17 | 2021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
This example consists in fitting a Gaussian Process model onto the diabetes
dataset.
The correlation parameters are determined by means of maximum likelihood
estimation (MLE). An anisotropic squared exponential correlation model with a
constant regression model are assumed. We also used a nugget = 1e-2 in order to
account for the (strong) noise in the targets.
We compute then compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
belltailjp/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
anshumang/gunrock-swched-swmm | output/test.py | 1 | 4841 | #!/usr/bin/env python
import vincent # http://vincent.readthedocs.org/en/latest/
# https://github.com/wrobstory/vincent
import pandas # http://pandas.pydata.org
import json # built-in
import os # built-in
## Load all JSON files into an array of dicts.
## Each array element is one JSON input file (one run).
## Each JSON input file is a dict indexed by attribute.
## If we have more than one JSON object per file:
## http://stackoverflow.com/questions/20400818/python-trying-to-deserialize-multiple-json-objects-in-a-file-with-each-object-s
json_files = [f for f in os.listdir('.')
if (os.path.isfile(f) and
f.endswith(".json") and
(f.startswith("BFS") or f.startswith("DOBFS")) and
not f.startswith("_"))]
data_unfiltered = [json.load(open(jf)) for jf in json_files]
df = pandas.DataFrame(data_unfiltered)
## All data is now stored in the pandas DataFrame "df".
## Let's add a new column (attribute), conditional on existing columns.
## We'll need this to pivot later.
def setParameters(row):
return (row['algorithm'] + ', ' +
('un' if row['undirected'] else '') + 'directed, ' +
('' if row['mark_predecessors'] else 'no ') + 'mark predecessors')
df['parameters'] = df.apply(setParameters, axis=1)
# df.loc[df['mark_predecessors'] & df['undirected'], 'parameters'] = "BFS, undirected, mark predecessors"
## Bar graph, restricted to mark-pred+undirected
## x axis: dataset, y axis: MTEPS
## The following two subsetting operations are equivalent.
df_mteps = df[df['mark_predecessors'] & df['undirected']] # except for BFS
df_mteps = df[df['parameters'] == "BFS, undirected, mark predecessors"]
## draw bar graph
# these next three appear to be equivalent
g_mteps = vincent.Bar(df_mteps,
columns=['m_teps'],
key_on='dataset') # key_on uses a DataFrame column
# for x-axis values
g_mteps = vincent.Bar(df_mteps.set_index('dataset'),
columns=['m_teps'])
g_mteps = vincent.Bar(df_mteps.set_index('dataset')['m_teps'])
## Set plotting parameters for bar graph
g_mteps.axis_titles(x='Dataset', y='MTEPS')
# g_mteps.scales['y'].type = 'log'
g_mteps.colors(brew='Set3')
g_mteps.to_json('_g_mteps.json',
html_out=True,
html_path='g_mteps.html')
## Grouped bar graph
## DataFrame needs to be: rows: groups (dataset)
## cols: measurements (m_teps, but labeled by categories)
## Each row is a set of measurements grouped together (here, by dataset)
## Each column is an individual measurement (here, mteps)
##
## The pivot changes
## columns values
## [[dataset 1, expt. A, measurement A]]
## [[dataset 1, expt. B, measurement B]]
## to
## [[dataset 1, measurement A, measurement B]]
g_grouped = vincent.GroupedBar(df.pivot(index='dataset',
columns='parameters',
values='m_teps'))
g_grouped.axis_titles(x='Dataset', y='MTEPS')
g_grouped.legend(title='Parameters')
g_grouped.colors(brew='Spectral')
# g_grouped.scales['y'].type = 'log'
g_grouped.to_json('_g_grouped.json',
html_out=True,
html_path='g_grouped.html')
## OK, let's slurp up the Gunrock paper's BFS data for non-Gunrock engines
## unfortunately gunrock-paper is a private repo, so this doesn't work
datasets = list(df.index)
# dfx = pandas.read_excel("https://github.com/owensgroup/gunrock-paper/raw/master/spread_sheets/comparison.xls", 'BFS')
# when reading, use the dataset names as the index. .T is "transpose"
dfx = pandas.read_excel("comparison.xls", 'BFS', index_col=0).T
dfx.index.name = 'dataset'
# get rid of columns (axis = 1) we don't care about
dfx.drop([
'Medusa',
'VertexAPI2',
'b40c',
'Davidson SSSP',
'gpu_BC',
'Gunrock'], inplace=True, axis=1)
# clean up the crazy Ligra data - X/Y => X
dfx['Ligra'] = dfx['Ligra'].apply(lambda x: float(x.split('/')[0]))
print dfx
## now let's manipulate the Gunrock data.
## Set up the index
df_gunrock = df.set_index('dataset')
## Keep only one parameter set per dataset for comparison
df_gunrock = df_gunrock[df_gunrock['parameters'] == "undirected, no mark predecessors"]
## Keep only "elapsed time" and rename it to Gunrock
df_gunrock = df_gunrock.filter(like='elapsed').rename(columns={'elapsed': 'Gunrock'})
print df_gunrock
# glue 'em together, replace all missing values with 0
dfxg = pandas.concat([dfx, df_gunrock], axis=1).fillna(0)
print dfxg
g_sum = vincent.GroupedBar(dfxg)
g_sum.axis_titles(x='Dataset', y='Elapsed Time')
g_sum.legend(title='Engine')
g_sum.colors(brew='Set2')
g_sum.to_json('_g_sum.json',
html_out=True,
html_path='g_sum.html')
| apache-2.0 |
sumspr/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
gnina/scripts | affinity_search/ga_addrequests.py | 1 | 8462 | #!/usr/bin/env python
'''Train a random forest on model performance from an sql database and then
run a genetic algorithm to propose new, better models to run.
'''
import sys, re, MySQLdb, argparse, os, json, subprocess
import pandas as pd
import makemodel
import numpy as np
from MySQLdb.cursors import DictCursor
from outputjson import makejson
from MySQLdb.cursors import DictCursor
from frozendict import frozendict
import sklearn
from sklearn.ensemble import *
from sklearn.preprocessing import *
from sklearn.feature_extraction import *
import deap
from deap import base, creator, gp, tools
from deap import algorithms
from deap import *
import multiprocessing
def getcursor(host,passwd,db):
'''create a connection and return a cursor;
doing this guards against dropped connections'''
conn = MySQLdb.connect (host = host,user = "opter",passwd=passwd,db=db)
conn.autocommit(True)
cursor = conn.cursor(DictCursor)
return cursor
def cleanparams(p):
'''standardize params that do not matter'''
modeldefaults = makemodel.getdefaults()
for i in range(1,6):
if p['conv%d_width'%i] == 0:
for suffix in ['func', 'init', 'norm', 'size', 'stride', 'width']:
name = 'conv%d_%s'%(i,suffix)
p[name] = modeldefaults[name]
if p['pool%d_size'%i] == 0:
name = 'pool%d_type'%i
p[name] = modeldefaults[name]
if p['fc_pose_hidden'] == 0:
p['fc_pose_func'] = modeldefaults['fc_pose_func']
p['fc_pose_hidden2'] = modeldefaults['fc_pose_hidden2']
p['fc_pose_func2'] = modeldefaults['fc_pose_func2']
p['fc_pose_init'] = modeldefaults['fc_pose_init']
elif p['fc_pose_hidden2'] == 0:
p['fc_pose_hidden2'] = modeldefaults['fc_pose_hidden2']
p['fc_pose_func2'] = modeldefaults['fc_pose_func2']
if p['fc_affinity_hidden'] == 0:
p['fc_affinity_func'] = modeldefaults['fc_affinity_func']
p['fc_affinity_hidden2'] = modeldefaults['fc_affinity_hidden2']
p['fc_affinity_func2'] = modeldefaults['fc_affinity_func2']
p['fc_affinity_init'] = modeldefaults['fc_affinity_init']
elif p['fc_affinity_hidden2'] == 0:
p['fc_affinity_hidden2'] = modeldefaults['fc_affinity_hidden2']
p['fc_affinity_func2'] = modeldefaults['fc_affinity_func2']
return p
def randParam(param, choices):
'''randomly select a choice for param'''
if isinstance(choices, makemodel.Range): #discretize
choices = np.linspace(choices.min,choices.max, 9)
return np.asscalar(np.random.choice(choices))
def randomIndividual():
ret = dict()
options = makemodel.getoptions()
for (param,choices) in options.items():
ret[param] = randParam(param, choices)
return cleanparams(ret)
def evaluateIndividual(ind):
x = dictvec.transform(ind)
return [rf.predict(x)[0]]
def mutateIndividual(ind, indpb=0.05):
'''for each param, with prob indpb randomly sample another choice'''
options = makemodel.getoptions()
for (param,choices) in options.items():
if np.random.rand() < indpb:
ind[param] = randParam(param, choices)
return (ind,)
def crossover(ind1, ind2, indpdb=0.5):
'''swap choices with probability indpb'''
options = makemodel.getoptions()
for (param,choices) in options.items():
if np.random.rand() < indpdb:
tmp = ind1[param]
ind1[param] = ind2[param]
ind2[param] = tmp
return (ind1,ind2)
def runGA(pop):
'''run GA with early stopping if not improving'''
hof = tools.HallOfFame(10)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
best = 0
pop = toolbox.clone(pop)
for i in range(40):
pop, log = algorithms.eaMuPlusLambda(pop, toolbox, mu=300, lambda_=300, cxpb=0.5, mutpb=0.2, ngen=25,
stats=stats, halloffame=hof, verbose=True)
newmax = log[-1]['max']
if best == newmax:
break
best = newmax
return pop
def addrows(config,host,db,password):
'''add rows from fname into database, starting at row start'''
conn = MySQLdb.connect (host = host,user = "opter",passwd=password,db=db)
cursor = conn.cursor()
items = list(config.items())
names = ','.join([str(n) for (n,v) in items])
values = ','.join(['%s' for (n,v) in items])
names += ',id'
values += ',"REQUESTED"'
#do five variations
for split in range(5):
seed = np.random.randint(0,100000)
n = names + ',split,seed'
v = values + ',%d,%d' % (split,seed)
insert = 'INSERT INTO params (%s) VALUES (%s)' % (n,v)
cursor.execute(insert,[v for (n,v) in items])
conn.commit()
parser = argparse.ArgumentParser(description='Generate more configurations with random forest and genetic algorithms')
parser.add_argument('--host',type=str,help='Database host',required=True)
parser.add_argument('-p','--password',type=str,help='Database password',required=True)
parser.add_argument('--db',type=str,help='Database name',default='database')
parser.add_argument('--pending_threshold',type=int,default=0,help='Number of pending jobs that triggers an update')
parser.add_argument('-n','--num_configs',type=int,default=1,help='Number of configs to generate - will add a multiple as many jobs')
args = parser.parse_args()
# first see how many id=REQUESTED jobs there are
cursor = getcursor(args.host,args.password,args.db)
cursor.execute('SELECT COUNT(*) FROM params WHERE id = "REQUESTED"')
rows = cursor.fetchone()
pending = list(rows.values())[0]
#print "Pending jobs:",pending
sys.stdout.write('%d '%pending)
sys.stdout.flush()
#if more than pending_threshold, quit
if pending > args.pending_threshold:
sys.exit(0)
cursor = getcursor(args.host,args.password,args.db)
cursor.execute('SELECT * FROM params WHERE id != "REQUESTED"')
rows = cursor.fetchall()
data = pd.DataFrame(list(rows))
#make errors zero - appropriate if error is due to parameters
data.loc[data.id == 'ERROR','R'] = 0
data.loc[data.id == 'ERROR','rmse'] = 0
data.loc[data.id == 'ERROR','top'] = 0
data.loc[data.id == 'ERROR','auc'] = 0
data['Rtop'] = data.R*data.top
data = data.dropna('index').apply(pd.to_numeric, errors='ignore')
#convert data to be useful for sklearn
notparams = ['R','auc','Rtop','id','msg','rmse','seed','serial','time','top','split']
X = data.drop(notparams,axis=1)
y = data.Rtop
dictvec = DictVectorizer()
#standardize meaningless params
Xv = dictvec.fit_transform(list(map(cleanparams,X.to_dict(orient='records'))))
print("\nTraining %d\n"%Xv.shape[0])
#train model
rf = RandomForestRegressor(n_estimators=20)
rf.fit(Xv,y)
#set up GA
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", dict, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("individual", tools.initIterate, creator.Individual, randomIndividual)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mutate",mutateIndividual)
toolbox.register("mate",crossover)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluateIndividual)
pool = multiprocessing.Pool()
toolbox.register("map", pool.map)
#setup initial population
initpop = [ creator.Individual(cleanparams(x)) for x in X.to_dict('records')]
evals = pool.map(toolbox.evaluate, initpop)
top = sorted([l[0] for l in evals],reverse=True)[0]
print("Best in training set: %f"%top)
seen = set(map(frozendict,initpop))
#include some random individuals
randpop = toolbox.population(n=len(initpop))
pop = runGA(initpop+randpop)
#make sure sorted
pop = sorted(pop,key=lambda x: -x.fitness.values[0])
#remove already evaluated configs
pop = [p for p in pop if frozendict(p) not in seen]
print("Best recommended: %f"%pop[0].fitness.values[0])
uniquified = []
for config in pop:
config = cleanparams(config)
fr = frozendict(config)
if fr not in seen:
seen.add(fr)
uniquified.append(config)
print(len(uniquified),len(pop))
for config in uniquified[:args.num_configs]:
addrows(config, args.host,args.db,args.password)
| bsd-3-clause |
bnaul/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 17 | 2876 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
# %%
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
hamogu/ARCUS | arcus/reduction/osip.py | 1 | 22367 | # Copyright (C) 2021 Massachusetts Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''This module is developed as part of arcus, but it is of more general use
and will eventually be moved to a more general package.'''
from os.path import join as pjoin
import logging
import os
import string
from abc import ABC
import numpy as np
import astropy.units as u
from astropy.table import Table
from . import arfrmf, ogip
from arcus.utils import OrderColor
from arcus.reduction.arfrmf import tagversion
try:
import matplotlib.pyplot as plt
HAS_PLT = True
except ImportError:
HAS_PLT = False
logger = logging.getLogger(__name__)
__all__ = ['OSIPBase',
'FixedWidthOSIP', 'FixedFractionOSIP', 'FractionalDistanceOSIP',
]
class OSIPBase(ABC):
'''Modify ARF files to account for order-sorting effects
This is a base class that implements order sorting and integration of
the probabilities (OSIP). This includes order-sorting of a photons list
(not yet implmented) and methods to modify ARF files
to account for order-sorting. Different diffraction orders fall on
the same physical space on the CCD. The CCD energy resolution can
then be used to assign photons to specific grating
orders. However, since the CCD energy resolution is finite, this
assingment is not perfect. Some photons may fall outside of the
nominal energy used for order sorting and thus be lost, other may
fall onto the energy assgined tothe different order. This class
modifies ARF files to correct for the integrated probability that a photon
falls inside the order-sorting region.
Derived classes should override either `osip_tab` or `osip_range`. The
other of the two can alwaus be calculated fron whatever one is given.
Parameters
----------
offset_orders : list of int
Offset from main order that is relevant.
ccd_redist : Redistribution object
Function that return the width the Gaussian sigma of the CCD
resolution, given an input energy.
TODO: Better docs here
'''
osip_description = 'Not implemented'
'''String to be added to FITS header in OSIP keyword for ARFs written.'''
def __init__(self, ccd_redist, offset_orders=[-1, 0, 1]):
self.offset_orders = offset_orders
self.ccd_redist = ccd_redist
@u.quantity_input(chan_mid=u.keV, equivalencies=u.spectral())
def osip_tab(self, chan_mid, order) -> u.eV:
'''Calculate the boundaries of an order-sorting region
Parameters
----------
energy : `~astropy.units.quantity.Quantity`
Energy (or wavelength actually) for which the OSIP should
be calculated, e.g. the mid-point of a channel
order : int
Diffraction order.
Returns
-------
osip_tab : `~astropy.units.quantity.Quantity`
The shape is an array that contains, for each point in `energy`,
the width of the OSIP region, as measured from the nominal
energy. If, for example, the energy is 1 keV and ``osip_tab``
is `[.1, .2] * u.keV`, then the osip is [.9, 1.2] keV.
'''
osiprange = self.osip_range(chan_mid, order)
osiprange = osiprange.to(u.eV, equivalencies=u.spectral())
E = chan_mid.to(u.eV, equivalencies=u.spectral())
return E + osiprange * np.array([-1, 1])[:, np.newaxis]
@u.quantity_input(chan_mid=u.keV, equivalencies=u.spectral())
def osip_range(self, chan_mid, order) -> u.eV:
'''Calculate the boundaries of an order-sorting region
This method returns the boundaries of an order-sorting region in
energy space.
This is very similar to `osip_tab`, which gives the width of the
region. Essentially, this method calls `osip_tab` and add the energy
so that the result contains the lower and upper bound.
Parameters
----------
chan_mid : `~astropy.units.quantity.Quantity`
Energy or wavelength for which the OSIP should
be calculated, e.g. the mid-point of a channel
order : int
Diffraction order.
Returns
-------
osip : `~astropy.units.quantity.Quantity`
The shape is an array that contains, for each point in `energy`,
the lower and upper bound of the OSIP region.
'''
osiptab = self.osip_tab(chan_mid, order)
osiptab = osiptab.to(u.eV, equivalencies=u.spectral())
E = chan_mid.to(u.eV, equivalencies=u.spectral())
return E + osiptab * np.array([-1, 1])[:, np.newaxis]
@u.quantity_input(chan_mid_nominal=u.keV, equivalencies=u.spectral())
def osip_factor(self, chan_mid_nominal, o_nominal, o_true):
'''Calculate the relative effective area after order sorting.
This method can be used to calculate a fraction fo
Parameters
----------
chan_mid_nominal : `~astropy.units.quantity.Quantity`
Energy or wavelength of the nominal order for which the OSIP should
be calculated, e.g. the mid-point of a channel
o_nominal : int
Nominal diffraction order, i.e. the order that is to be extracted
from the CCD.
o_true : int
True diffracton order.
Returns
-------
osip : `~astropy.units.quantity.Quantity`
The shape is an array that contains, for each point in `energy`,
the lower and upper bound of the OSIP region.
'''
if np.sign(o_nominal) != np.sign(o_true):
return 0.
osiprange = self.osip_range(chan_mid_nominal, o_nominal)
Etrue = chan_mid_nominal.to(u.eV, equivalencies=u.spectral()) * \
(o_true / o_nominal)
upper = self.ccd_redist.cdf(osiprange[1, :], loc=Etrue)
lower = self.ccd_redist.cdf(osiprange[0, :], loc=Etrue)
return upper - lower
def apply_osip(self, inputarf, outpath, order, outroot='',
overwrite=False):
'''Modify an ARF to account for incomplete order sorting
This function reads an input ARF file, which contains the
effective area for a grating order in Arcus. For a given order
sorting window, it then calculates what fraction of the
photons is lost. For example, if the order-sorting regions can be
is chosen to contain 90% energy fraction, then the new ARF values
will be 0.9 (the integrated probability over the order-sorting region)
times the input ARF.
If the ``order`` of the new ARF differs from the order of the
input ARF, then the new ARF is representative of order
confusion, e.g. is shows how many photons of order 4 are
sorted into the order=5 grating spectrum.
Parameters
----------
inputarf : string
Filename and path of input arf
outpath : string
Location where the output arfs are deposited
order : int
Nominal order. (The true order of the input arf is taken from the
input arf header data.)
outroot : string
prefix for output filename
overwrite : bool
Overwrite existing files?
'''
arf = ogip.ARF.read(inputarf)
try:
arf['SPECRESP'] = arf['SPECRESP'] / arf['OSIPFAC']
logger.info(f'{inputarf} already has OSIP applied, reverting ' +
'before applying new OSIP.')
except KeyError:
pass
m = int(arf.meta['ORDER'])
energies = 0.5 * (arf['ENERG_LO'] + arf['ENERG_HI'])
osip_fac = self.osip_factor(energies / m * order, order, m)
arf['SPECRESP'] = osip_fac * arf['SPECRESP']
arf['OSIPFAC'] = osip_fac
arf.meta['INSTRUME'] = f'ORDER_{order}'
arf.meta['OSIP'] = self.osip_description
arf.meta['TRUEORD'] = f'{m}'
arf.meta['CCDORDER'] = f'{order}'
# some times there is no overlap and all elements become 0
if np.all(arf['SPECRESP'] == 0):
logger.info(f'True refl order {m} does not contributed to ' +
f'CCDORDER {order}. ' +
'Writing ARF with all entries equal to zero.')
os.makedirs(outpath, exist_ok=True)
arf.write(pjoin(outpath, outroot +
arfrmf.filename_from_meta('arf', **arf.meta)),
overwrite=overwrite)
def write_readme(self, outpath, outroot=''):
'''Write README file to directory with ARFs
Parameters
----------
outpath : string
Location where the output ARFs are deposited
outroot : string
prefix for output filename
'''
# Get a table so that I can tag it and get all the meta information
tag = Table()
tagversion(tag)
# Format the meta information into a string
tagstring = ''
for k, v in tag.meta.items():
if isinstance(v, str):
tagstring += f'{k}: {v}\n'
else:
tagstring += f'{k}: {v[0]} // {v[1]}\n'
with open(pjoin(os.path.dirname(__file__),
'data', "osip_template.md")) as t:
template = string.Template(t.read())
output = template.substitute(tagversion=tagstring)
with open(pjoin(outpath, outroot + "README.md"), "w") as f:
f.write(output)
def plot_osip(self, ax, grid, order, **kwargs):
'''Plot banana plot with OSIP region marked.
Parameters
----------
ax : `matplotlib.axes._subplots.AxesSubplot`
The axes into which the banana is plotted.
grid : `~astropy.units.quantity.Quantity`
Wavelength grid in m lambda
order : int
Order number
kwargs :
Any other parameters are passed to ``plt.plot``
'''
grid = grid.to(u.Angstrom, equivalencies=u.spectral())
en = (grid / np.abs(order)).to(u.keV, equivalencies=u.spectral())
ohw = self.osip_tab(en, order)
line = ax.plot(grid, en, label=order, **kwargs)
ax.fill_between(grid, en - ohw[0, :], en + ohw[1, :],
color=line[0].get_color(), alpha=.2,
label='__no_legend__')
ax.set_xlabel(f'$m\\lambda$ [{grid.unit.to_string("latex_inline")}]')
ax.set_ylabel('CCD energy [keV]')
ax.set_xlim([grid.value.min(), grid.value.max()])
ax.legend()
ax.set_title('Order sorting regions')
def plot_mixture(self, ax, grid, order):
'''Plot relative contribution of main order and interlopers.
Parameters
----------
ax : `matplotlib.axes._subplots.AxesSubplot`
The axes into which the lines are plotted.
grid : `~astropy.units.quantity.Quantity`
Wavelength grid in m lambda
order : int
Order number
'''
grid = grid.to(u.Angstrom, equivalencies=u.spectral())
ax.axhspan(1, 2, facecolor='r', alpha=.3, label='extractions overlap')
en = (grid / np.abs(order)).to(u.keV, equivalencies=u.spectral())
cm = self.osip_factor(en, order, order)
ax.plot(grid, cm, label='main order', lw=2)
for o in self.offset_orders:
if o == 0:
continue
coffset = self.osip_factor(en, order, order + o)
ax.plot(grid, coffset, label=f'contam {o}',
# Different linestyle to avoid overlapping lines in plot
ls={-1: '-', +1: ':'}[np.sign(o)]
)
cm += coffset
ax.plot(grid, cm, 'k', label='sum', lw=3)
ax.set_xlabel(f'$m\\lambda$ [{grid.unit.to_string("latex_inline")}]')
ax.set_ylabel('Fraction of photons in OSIP')
ax.set_title(f'Order {order}')
ax.set_xlim([grid.value.min(), grid.value.max()])
ax.set_ylim(0, cm.max() * 1.05)
ax.legend()
def plot_summary(self, inputarf, orders, outpath, outroot=''):
'''Write summary plot to directory with ARFs
Parameters
----------
inputarf : string
Path to one input ARF. The energy grid for the plot
is taken from that ARF.
outpath : string
Location where the output ARFs are deposited
outroot : string
prefix for output filename
'''
arf = ogip.ARF.read(inputarf)
bin_mid = 0.5 * (arf['ENERG_LO'] + arf['ENERG_HI'])
grid = bin_mid.to(u.Angstrom, equivalencies=u.spectral())
fig, axes = plt.subplots(ncols=2, figsize=(8, 4))
oc = OrderColor(max_order=np.max(np.abs(orders)))
for order in orders:
self.plot_osip(axes[0], grid * abs(arf.meta['ORDER']), order,
**oc(order))
# pick the middle order for plotting purposes
o_mid = orders[len(orders) // 2]
self.plot_mixture(axes[1], grid * abs(arf.meta['ORDER']), o_mid)
fig.subplots_adjust(wspace=.3)
fig.savefig(pjoin(outpath, outroot + 'OSIP_regions.pdf'),
bbox_inches='tight')
def apply_osip_all(self, inpath, outpath, orders,
inroot='', outroot='', ARCCHAN='all',
overwrite=False):
'''Apply OSIP to many arfs at once
This routine iterates over orders and offset orders to produce
arfs that describe the contamination due to insufficient order
sorting. When a single order is extracted (e.g. order 5), but
the CCD resolution is insufficient to really separate the
orders well, then some photons from order 4 and 6 might end up
in the extracted spectrum. This function generates the
appropriate arfs.
Input arfs need to follow the arcus filename convention and
all be located in the same directory. In addition to the ARFs
with order-sorting applied, this method also places an
overview plot (assuming matplotlbi is available) and a readme
file in the output directory.
Parameters
----------
inpath : string
Directory with input ARFs.
outpath : string
Location where the output ARFs are deposited
orders : list of int
Nominal CCD orders to be processed
inroot : string
prefix for input filename
outroot : string
prefix for output filename
ARCCHAN : string
Channel for Arcus
overwrite : bool
Overwrite existing files?
'''
goodarf = None
for order in orders:
for t in self.offset_orders:
# No contamination by zeroth order or by orders on the other
# side of the zeroth order
if (order + t != 0) and (np.sign(order) == np.sign(order + t)):
inputarf = pjoin(inpath, inroot +
arfrmf.filename_from_meta(filetype='arf',
ARCCHAN=ARCCHAN,
ORDER=order + t))
try:
self.apply_osip(inputarf, outpath, order,
outroot=outroot, overwrite=overwrite)
goodarf = inputarf
except FileNotFoundError:
logger.info(f'Skipping order: {order}, offset: {t} ' +
'because input arf not found')
continue
# The second condition checks that at least one ARF was written
if HAS_PLT and (goodarf is not None):
self.plot_summary(goodarf, orders, outpath, outroot)
self.write_readme(outpath, outroot)
class FixedWidthOSIP(OSIPBase):
'''Modify ARF files to account for order-sorting effects
This class implements order sorting and integration of
the probabilities (OSIP) for order-sorting regions that have a fixed witdh
in energy.
This includes order-sorting of a photons list
(not yet implmented) and methods to modify ARF files
to account for order-sorting. Different diffraction orders fall on
the same physical space on the CCD. The CCD energy resolution can
then be used to assign photons to specific grating
orders. However, since the CCD energy resolution is finite, this
assingment is not perfect. Some photons may fall outside of the
nominal energy used for order sorting and thus be lost, other may
fall onto the energy assgined tothe different order. This class
modifies ARF files to correct for the integrated probability that a photon
falls inside the order-sorting region.
Parameters
----------
halfwidth : `astropy.units.quantity.Quantity`
Half-wdith of the order sorting region. The same width is used for all
wavelength.
offset_orders : list of int
Offset from main order that is relevant.
ccd_redist : callable
Function that return the width the Gaussian sigma of the CCD
resolution, given an input energy.
'''
def __init__(self, halfwidth, **kwargs):
self.halfwidth = halfwidth
super().__init__(**kwargs)
@property
def osip_description(self):
return str(self.halfwidth)
@u.quantity_input(chan_mid_nominal=u.keV, equivalencies=u.spectral())
def osip_tab(self, chan_mid_nominal, order):
return np.broadcast_to(self.halfwidth,
(2, len(chan_mid_nominal)), subok=True)
class FixedFractionOSIP(OSIPBase):
'''Modify ARF files to account for order-sorting effects
This class implements order sorting and integration of
the probabilities (OSIP) for order sorting regions that contain a fixed
fraction of the photons in energy space.
This includes order-sorting of a photons list
(not yet implmented) and methods to modify ARF files
to account for order-sorting. Different diffraction orders fall on
the same physical space on the CCD. The CCD energy resolution can
then be used to assign photons to specific grating
orders. However, since the CCD energy resolution is finite, this
assingment is not perfect. Some photons may fall outside of the
nominal energy used for order sorting and thus be lost, other may
fall onto the energy assgined tothe different order. This class
modifies ARF files to correct for the integrated probability that a photon
falls inside the order-sorting region.
Parameters
----------
fraction : float
Number (between 0 and 1) that determins which fraction of the CCD
energy distriution should be covered by the order sorting regions.
The same width is used for all
wavelength.
offset_orders : list of int
Offset from main order that is relevant.
ccd_redist : callable
Function that return the width the Gaussian sigma of the CCD
resolution, given an input energy.
'''
def __init__(self, fraction, **kwargs):
self.fraction = fraction
super().__init__(**kwargs)
@property
def osip_description(self):
return 'OSIPFrac' + str(self.fraction)
@u.quantity_input(chan_mid_nominal=u.keV, equivalencies=u.spectral())
def osip_range(self, chan_mid_nominal, order):
return self.ccd_redist.interval(self.fraction, loc=chan_mid_nominal)
class FractionalDistanceOSIP(OSIPBase):
'''Modify ARF files to account for order-sorting effects
This class implements order sorting and integration of
the probabilities (OSIP) for order sorting regions that fill a fixed
fraction of the energy space.
This includes order-sorting of a photons list
(not yet implmented) and methods to modify ARF files
to account for order-sorting. Different diffraction orders fall on
the same physical space on the CCD. The CCD energy resolution can
then be used to assign photons to specific grating
orders. However, since the CCD energy resolution is finite, this
assingment is not perfect. Some photons may fall outside of the
nominal energy used for order sorting and thus be lost, other may
fall onto the energy assgined tothe different order. This class
modifies ARF files to correct for the integrated probability that a photon
falls inside the order-sorting region.
Parameters
----------
fraction : float
Fraction (between 0 and 1) of the space between orders that will be
covered by the extration region. For a value of 1, order-sorting
regions just touch and each photon will be assigned to exactly one
order.
offset_orders : list of int
Offset from main order that is relevant.
ccd_redist : callable
Function that return the width the Gaussian sigma of the CCD
resolution, given an input energy.
'''
def __init__(self, fraction=1., **kwargs):
self.fraction = fraction
super().__init__(**kwargs)
@property
def osip_description(self):
return 'OSIPDist' + str(self.fraction)
@u.quantity_input(chan_mid_nominal=u.keV, equivalencies=u.spectral())
def osip_tab(self, chan_mid_nominal, order):
energy = chan_mid_nominal.to(u.keV, equivalencies=u.spectral())
dE = energy / abs(order)
return np.broadcast_to(self.fraction / 2 * dE,
(2, len(chan_mid_nominal)), subok=True)
| gpl-3.0 |
michalsenkyr/spark | python/setup.py | 5 | 10243 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.7'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
alberto-antonietti/nest-simulator | pynest/examples/mc_neuron.py | 12 | 7554 | # -*- coding: utf-8 -*-
#
# mc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Multi-compartment neuron example
--------------------------------
Simple example of how to use the three-compartment ``iaf_cond_alpha_mc``
neuron model.
Three stimulation paradigms are illustrated:
- externally applied current, one compartment at a time
- spikes impinging on each compartment, one at a time
- rheobase current injected to soma causing output spikes
Voltage and synaptic conductance traces are shown for all compartments.
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
##############################################################################
# We then extract the receptor types and the list of recordable quantities
# from the neuron model. Receptor types and recordable quantities uniquely
# define the receptor type and the compartment while establishing synaptic
# connections or assigning multimeters.
syns = nest.GetDefaults('iaf_cond_alpha_mc')['receptor_types']
print("iaf_cond_alpha_mc receptor_types: {0}".format(syns))
rqs = nest.GetDefaults('iaf_cond_alpha_mc')['recordables']
print("iaf_cond_alpha_mc recordables : {0}".format(rqs))
###############################################################################
# The simulation parameters are assigned to variables.
nest.SetDefaults('iaf_cond_alpha_mc',
{'V_th': -60.0, # threshold potential
'V_reset': -65.0, # reset potential
't_ref': 10.0, # refractory period
'g_sp': 5.0, # somato-proximal coupling conductance
'soma': {'g_L': 12.0}, # somatic leak conductance
# proximal excitatory and inhibitory synaptic time constants
'proximal': {'tau_syn_ex': 1.0,
'tau_syn_in': 5.0},
'distal': {'C_m': 90.0} # distal capacitance
})
###############################################################################
# The nodes are created using ``Create``. We store the returned handles
# in variables for later reference.
n = nest.Create('iaf_cond_alpha_mc')
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create('multimeter', params={'record_from': rqs, 'interval': 0.1})
nest.Connect(mm, n)
###############################################################################
# We create one current generator per compartment and configure a stimulus
# regime that drives distal, proximal and soma dendrites, in that order.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current.
cgs = nest.Create('dc_generator', 3)
cgs[0].set(start=250.0, stop=300.0, amplitude=50.0) # soma
cgs[1].set(start=150.0, stop=200.0, amplitude=-50.0) # proxim.
cgs[2].set(start=50.0, stop=100.0, amplitude=100.0) # distal
###############################################################################
# Generators are then connected to the correct compartments. Specification of
# the ``receptor_type`` uniquely defines the target compartment and receptor.
nest.Connect(cgs[0], n, syn_spec={'receptor_type': syns['soma_curr']})
nest.Connect(cgs[1], n, syn_spec={'receptor_type': syns['proximal_curr']})
nest.Connect(cgs[2], n, syn_spec={'receptor_type': syns['distal_curr']})
###############################################################################
# We create one excitatory and one inhibitory spike generator per compartment
# and configure a regime that drives distal, proximal and soma dendrites, in
# that order, alternating the excitatory and inhibitory spike generators.
sgs = nest.Create('spike_generator', 6)
sgs[0].spike_times = [600.0, 620.0] # soma excitatory
sgs[1].spike_times = [610.0, 630.0] # soma inhibitory
sgs[2].spike_times = [500.0, 520.0] # proximal excitatory
sgs[3].spike_times = [510.0, 530.0] # proximal inhibitory
sgs[4].spike_times = [400.0, 420.0] # distal excitatory
sgs[5].spike_times = [410.0, 430.0] # distal inhibitory
###############################################################################
# Connect generators to correct compartments in the same way as in case of
# current generator
nest.Connect(sgs[0], n, syn_spec={'receptor_type': syns['soma_exc']})
nest.Connect(sgs[1], n, syn_spec={'receptor_type': syns['soma_inh']})
nest.Connect(sgs[2], n, syn_spec={'receptor_type': syns['proximal_exc']})
nest.Connect(sgs[3], n, syn_spec={'receptor_type': syns['proximal_inh']})
nest.Connect(sgs[4], n, syn_spec={'receptor_type': syns['distal_exc']})
nest.Connect(sgs[5], n, syn_spec={'receptor_type': syns['distal_inh']})
###############################################################################
# Run the simulation for 700 ms.
nest.Simulate(700)
###############################################################################
# Now we set the intrinsic current of soma to 150 pA to make the neuron spike.
n.set({'soma': {'I_e': 150.}})
###############################################################################
# We simulate the network for another 300 ms and retrieve recorded data from
# the multimeter
nest.Simulate(300)
rec = mm.events
###############################################################################
# We create an array with the time points when the quantities were actually
# recorded
t = rec['times']
###############################################################################
# We plot the time traces of the membrane potential and the state of each
# membrane potential for soma, proximal, and distal dendrites (`V_m.s`, `V_m.p`
# and `V_m.d`).
plt.figure()
plt.subplot(211)
plt.plot(t, rec['V_m.s'], t, rec['V_m.p'], t, rec['V_m.d'])
plt.legend(('Soma', 'Proximal dendrite', 'Distal dendrite'),
loc='lower right')
plt.axis([0, 1000, -76, -59])
plt.ylabel('Membrane potential [mV]')
plt.title('Responses of iaf_cond_alpha_mc neuron')
###############################################################################
# Finally, we plot the time traces of the synaptic conductance measured in
# each compartment.
plt.subplot(212)
plt.plot(t, rec['g_ex.s'], 'b-', t, rec['g_ex.p'], 'g-',
t, rec['g_ex.d'], 'r-')
plt.plot(t, rec['g_in.s'], 'b--', t, rec['g_in.p'], 'g--',
t, rec['g_in.d'], 'r--')
plt.legend(('g_ex.s', 'g_ex.p', 'g_in.d', 'g_in.s', 'g_in.p', 'g_in.d'))
plt.axis([350, 700, 0, 1.15])
plt.xlabel('Time [ms]')
plt.ylabel('Synaptic conductance [nS]')
plt.show()
| gpl-2.0 |
subutai/nupic.research | nupic/research/frameworks/vernon/mixins/gradient_metrics.py | 2 | 12470 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from copy import deepcopy
from pprint import pformat
import matplotlib.pyplot as plt
import numpy as np
import torch
from nupic.research.frameworks.pytorch.hooks import ModelHookManager, TrackGradientsHook
from nupic.research.frameworks.pytorch.model_utils import filter_modules
class GradientMetrics(object):
"""
Mixin for tracking and plotting module gradient metrics during training.
:param config: a dict containing the following
- gradient_metrics_args: a dict containing the following
- include_modules: (optional) a list of module types to track
- include_names: (optional) a list of module names to track e.g.
"features.stem"
- include_patterns: (optional) a list of regex patterns to compare to the
names; for instance, all feature parameters in ResNet
can be included through "features.*"
- plot_freq: (optional) how often to create the plot, measured in training
iterations; defaults to 1
- metrics: a list of metrics options from ["cosine", "dot", "pearson"];
defaults to ["cosine",]
- gradient_values: (optional) one of "real", "sign", "mask".
"real" corresponds to the real values of the gradients,
"sign" corresponds to collecting the sign of the gradients,
"mask" results in a binary mask corresponding to nonzero gradients;
defaults to "real"
- max_samples_to_track: (optional) how many of samples to use for plotting;
only the newest will be used; defaults to 100
Example config:
```
config=dict(
gradient_metrics_args=dict(
include_modules=[torch.nn.Linear, KWinners],
plot_freq=1,
max_samples_to_track=150,
metrics=["dot", "pearson"],
gradient_values="mask",
metric1 = "mask/dot"
metric2 = "sign/pearson"
)
)
```
"""
def setup_experiment(self, config):
super().setup_experiment(config)
# Process config args
gradient_metrics_args = config.get("gradient_metrics_args", {})
self.gradient_metrics_plot_freq, self.gradient_metrics_filter_args, \
self.gradient_metrics_max_samples, self.gradient_metrics = \
process_gradient_metrics_args(
gradient_metrics_args)
# Register hook for tracking hidden activations
named_modules = filter_modules(self.model, **self.gradient_metrics_filter_args)
hook_args = dict(max_samples_to_track=self.gradient_metrics_max_samples)
self.gradient_metric_hooks = ModelHookManager(
named_modules, TrackGradientsHook, hook_type="backward", hook_args=hook_args
)
# Log the names of the modules being tracked
tracked_names = pformat(list(named_modules.keys()))
self.logger.info(f"Tracking gradients for modules: {tracked_names}")
# The targets will be collected in `self.error_loss` in a 1:1 fashion
# to the tensors being collected by the hooks.
self.gradient_metric_targets = torch.tensor([]).long()
def run_epoch(self):
"""
This runs the epoch with the hooks in tracking mode. The resulting gradients
collected by the `TrackGradientsHook` object are plotted by calling a
plotting function.
"""
# Run the epoch with tracking enabled.
with self.gradient_metric_hooks:
results = super().run_epoch()
# The epoch was iterated in `run_epoch` so epoch 0 is really epoch 1 here.
iteration = self.current_epoch + 1
# Calculate metrics, create visualization, and update results dict.
if iteration % self.gradient_metrics_plot_freq == 0:
gradient_stats = self.gradient_metric_hooks.get_statistics()
gradient_metrics_stats = self.calculate_gradient_metrics_stats(
gradient_stats
)
gradient_metric_heatmaps = self.plot_gradient_metric_heatmaps(
gradient_metrics_stats
)
for (name, _, gradient_metric, gradient_value, _, figure) in \
gradient_metric_heatmaps:
results.update({f"{gradient_metric}/{gradient_value}/{name}": figure})
return results
def calculate_gradient_metrics_stats(self, gradients_stats):
"""
This function calculates statistics given the gradients_stats which are being
tracked by the TrackGradients backwards hook.
This function accesses self.metrics, which is alsa a list of tuples. Each
tuple is the combination of a metric function ("cosine", "dot", "pearson)
and a gradient transformation ("real", "sign", "mask"). By default,
the statistics that are calculated are ("cosine", "mask") and ("cosine",
"real"). ("cosine", "mask") corresponds to the overlap between two different
gradients, and ("cosine", "real") corresponds to the standard cosine
similarity between two gradients.
Args:
gradients_stats: A list of tuples, each of which contains a named module
and its gradients
Returns:
A list of tuples, each of which contains a named module, the statistics
calculated from its gradients, and the metric function/transformation
used on the gradients
"""
all_stats = []
for (name, module, gradients) in gradients_stats:
for gradient_metric, gradient_value in self.gradient_metrics:
# apply gradient value transformation if necessary
if gradient_value == "sign":
gradients = torch.sign(gradients)
elif gradient_value == "mask":
gradients = torch.abs(torch.sign(gradients))
# calculate metric function on transformed gradients
if gradient_metric == "cosine":
stats = [
torch.cosine_similarity(x, y, dim=0)
if not torch.equal(x, y) else 0
for x in gradients
for y in gradients
]
elif gradient_metric == "dot":
stats = [x.dot(y) if not torch.equal(x, y) else 0
for x in gradients
for y in gradients]
elif gradient_metric == "pearson":
stats = [
torch.cosine_similarity(x - x.mean(), y - y.mean(), dim=0)
if not torch.equal(x, y) else 0
for x in gradients
for y in gradients
]
stats = torch.tensor(stats)
gradient_dim = len(gradients)
stats = stats.view(gradient_dim, gradient_dim)
all_stats.append((name, module, gradient_metric, gradient_value, stats))
return all_stats
def plot_gradient_metric_heatmaps(self, gradient_metrics_stats):
order_by_class = torch.argsort(self.gradient_metric_targets)
sorted_gradient_metric_targets = self.gradient_metric_targets[order_by_class]
class_change_indices = \
(sorted_gradient_metric_targets - sorted_gradient_metric_targets.roll(
1)).nonzero(as_tuple=True)[0].cpu()
class_labels = [int(_x) for _x in
sorted_gradient_metric_targets.unique()]
class_change_indices_right = class_change_indices.roll(-1).cpu()
class_change_indices_right[-1] = len(sorted_gradient_metric_targets)
tick_locations = (class_change_indices + class_change_indices_right) / 2.0 - 0.5
tick_locations = tick_locations.cpu()
stats_and_figures = []
for (name, module, gradient_metric, gradient_value, stats) in \
gradient_metrics_stats:
stats = stats[order_by_class, :][:, order_by_class]
ax = plt.gca()
max_val = np.abs(stats).max()
img = ax.imshow(stats, cmap="bwr", vmin=-max_val, vmax=max_val)
ax.set_xlabel("class")
ax.set_ylabel("class")
for idx in class_change_indices:
ax.axvline(idx - 0.5, color="black")
ax.axhline(idx - 0.5, color="black")
ax.set_xticks(tick_locations)
ax.set_yticks(tick_locations)
ax.set_xticklabels(class_labels)
ax.set_yticklabels(class_labels)
ax.set_title(f"{gradient_metric}:{gradient_value}:{name}")
plt.colorbar(img, ax=ax)
plt.tight_layout()
figure = plt.gcf()
stats_and_figures.append((name, module, gradient_metric,
gradient_value, stats, figure))
return stats_and_figures
def error_loss(self, output, target, reduction="mean"):
"""
This computes the loss and then saves the targets computed on this loss. This
mixin assumes these targets correspond, in a 1:1 fashion, to the samples seen
in the forward pass.
"""
loss = super().error_loss(output, target, reduction=reduction)
if self.gradient_metric_hooks.tracking:
# Targets were initialized on the cpu which could differ from the
# targets collected during the forward pass.
self.gradient_metric_targets = self.gradient_metric_targets.to(
target.device
)
# Concatenate and discard the older targets.
self.gradient_metric_targets = torch.cat(
[target, self.gradient_metric_targets], dim=0
)
self.gradient_metric_targets = self.gradient_metric_targets[
: self.gradient_metrics_max_samples
]
return loss
def process_gradient_metrics_args(gradient_metric_args):
gradient_metrics_args = deepcopy(gradient_metric_args)
# Collect information about which modules to apply hooks to
include_names = gradient_metrics_args.pop("include_names", [])
include_modules = gradient_metrics_args.pop("include_modules", [])
include_patterns = gradient_metrics_args.pop("include_patterns", [])
filter_args = dict(
include_names=include_names,
include_modules=include_modules,
include_patterns=include_patterns,
)
# Others args
plot_freq = gradient_metrics_args.get("plot_freq", 1)
max_samples = gradient_metrics_args.get("max_samples_to_track", 100)
metrics = gradient_metrics_args.get("metrics", [("cosine", "mask"),
("cosine", "real")])
available_metrics_options = ["cosine", "pearson", "dot"]
available_gradient_values_options = ["real", "sign", "mask"]
assert isinstance(plot_freq, int)
assert isinstance(max_samples, int)
assert isinstance(metrics, list)
assert len(metrics) > 0
assert all([metric in available_metrics_options
and gradient_value in available_gradient_values_options
for metric, gradient_value in metrics])
assert plot_freq > 0
assert max_samples > 0
return plot_freq, filter_args, max_samples, metrics
| agpl-3.0 |
Nyker510/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
clawpack/geoclaw_tutorial_csdms2016 | chile2010a/setplot.py | 1 | 4467 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import numpy as np
import matplotlib.pyplot as plt
#--------------------------
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
from clawpack.visclaw.data import ClawPlotData
from numpy import linspace
if plotdata is None:
plotdata = ClawPlotData()
plotdata.clearfigures() # clear any old figures,axes,items data
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
def fixup(current_data):
import pylab
#addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Surface at %4.2f hours' % t, fontsize=20)
pylab.xticks(fontsize=15)
pylab.yticks(fontsize=15)
#-----------------------------------------
# Figure for surface
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.2
plotitem.pcolor_cmax = 0.2
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [1,1,0]
plotitem.patchedges_show = 1
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [1,1,0]
plotitem.patchedges_show = 1
plotaxes.xlimits = [-120,-60]
plotaxes.ylimits = [-60,0]
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface at gauges', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
def add_zeroline(current_data):
from pylab import plot, legend, xticks, floor, axis, xlabel
t = current_data.t
gaugeno = current_data.gaugeno
plot(t, 0*t, 'k')
n = int(floor(t.max()/3600.) + 2)
xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)])
xlabel('time (hours)')
plotaxes.afteraxes = add_zeroline
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| bsd-2-clause |
Srisai85/scipy | scipy/interpolate/ndgriddata.py | 45 | 7161 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
ankeshanand/masters-thesis | code/model/model.py | 1 | 1915 | from extract_features import create_feature_results_matrix
from sklearn.externals import joblib
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import RandomizedPCA
from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import Normalizer
datapath = '/home/ankesh/masters-thesis/data/reviews_Cell_Phones_and_Accessories.json.gz'
X, y = create_feature_results_matrix(datapath)
#print 'Dumping matrices to disk.'
filename_X = 'X.joblib.pkl'
filename_y = 'y.joblib.pkl'
_ = joblib.dump(X, filename_X, compress=9)
_ = joblib.dump(y, filename_y, compress=9)
print 'Loading matrices'
X = joblib.load(filename_X)
y = joblib.load(filename_y)
#X = X.toarray()
print X.shape
from sklearn import svm
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition, pipeline, metrics, grid_search
#print 'Truncated SVD'
#pca = RandomizedPCA(n_components=200)
#X_reduced = pca.fit_transform(X)
#print 'Standard Scaler'
#scl = StandardScaler()
#X_scaled = scl.fit_transform(X)
print 'Normalizer'
nom = Normalizer(copy=False)
X_normalized = nom.fit_transform(X)
#svm_model = LinearSVR(C=3)
#clf = pipeline.Pipeline([('svd', svd),('scl', scl),('svm', svm_model)])
#param_grid = {'svm__C': [1,3]}
#print 'Grid Search started'
#model_svm = grid_search.GridSearchCV(estimator = clf, param_grid=param_grid, scoring='mean_squared_error',n_jobs=-1, iid=True, refit=True, cv=10)
#model_svm.fit(X,y)
#print model_svm.best_score_
#print model_svm.best_estimator_
from sklearn.linear_model.stochastic_gradient import SGDRegressor
sgd = SGDRegressor(loss='epsilon_insensitive')
#from sklearn.ensemble import GradientBoostingRegressor
#gbt = GradientBoostingRegressor()
print 'Cross Validation started'
scores = cross_val_score(sgd, X_normalized, y, cv=2, scoring='mean_squared_error', n_jobs=-1)
print scores
print scores.mean()
| mit |
camisatx/pySecMaster | pySecMaster/load_aux_tables.py | 1 | 11260 | import time
from datetime import datetime
import os
import pandas as pd
import psycopg2
from utilities.database_queries import df_to_sql, query_load_table,\
update_load_table
__author__ = 'Josh Schertz'
__copyright__ = 'Copyright (C) 2018 Josh Schertz'
__description__ = 'An automated system to store and maintain financial data.'
__email__ = 'josh[AT]joshschertz[DOT]com'
__license__ = 'GNU AGPLv3'
__maintainer__ = 'Josh Schertz'
__status__ = 'Development'
__url__ = 'https://joshschertz.com/'
__version__ = '1.5.0'
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
class LoadTables(object):
def __init__(self, database, user, password, host, port, tables_to_load,
load_tables='load_tables'):
self.database = database
self.user = user
self.password = password
self.host = host
self.port = port
self.load_to_sql(tables_to_load, load_tables)
@staticmethod
def altered_values(existing_df, new_df):
""" Compare the two provided DataFrames, returning a new DataFrame that
only includes rows from the new_df that are different from the
existing_df.
:param existing_df: DataFrame of the existing values
:param new_df: DataFrame of the next values
:return: DataFrame with the altered/new values
"""
# DataFrame with the similar values from both the existing_df and the
# new_df.
combined_df = pd.merge(left=existing_df, right=new_df, how='inner',
on=list(new_df.columns.values))
# In a new DataFrame, only keep the new_df rows that did NOT have a
# match to the existing_df
id_col_name = list(new_df.columns.values)[0]
altered_df = new_df[~new_df[id_col_name].isin(combined_df[id_col_name])]
return altered_df
def find_tsid(self, table_df):
""" This only converts the stock's ticker to it's respective symbol_id.
This requires knowing the ticker, the exchange and data vendor.
:param table_df: DataFrame with the ticker and index
:return: DataFrame with symbol_id's instead of tickers
"""
try:
conn = psycopg2.connect(database=self.database, user=self.user,
password=self.password, host=self.host,
port=self.port)
with conn:
cur = conn.cursor()
# Determines if the quandl_codes table is empty? Stop if it is.
cur.execute('SELECT q_code FROM quandl_codes LIMIT 1')
if not cur.fetchall():
print('The quandl_codes table is empty. Run the code to '
'download the Quandl Codes and then run this again.')
else:
table_df = self.find_symbol_id_process(table_df, cur)
return table_df
except psycopg2.Error as e:
print('Error when trying to retrieve data from the %s database '
'in LoadTables.find_q_code')
print(e)
@staticmethod
def find_symbol_id_process(table_df, cur):
""" Finds the ticker's symbol_id. If the table provided has an exchange
column, then the ticker and exchange will be used to find the
symbol_id. The result should be a perfect match to the quandl_codes
table. If an exchange column doesn't exist, then only the ticker will
be used, along with an implied US exchange. Thus, only tickers traded
on US exchanges will have their symbol_id's found. A way around this is
to provide the exchange in the load file.
:param table_df: A DataFrame with each row a ticker plus extra items
:param cur: A cursor for navigating the SQL database.
:return: A DataFrame with the original ticker replaced with a symbol_id
"""
if 'exchange' in table_df.columns:
# ToDo: Find a new source for the tickers table
cur.execute("""SELECT symbol_id, component, data
FROM quandl_codes""")
data = cur.fetchall()
q_codes_df = pd.DataFrame(data, columns=['symbol_id', 'ticker',
'exchange'])
q_codes_df.drop_duplicates('symbol_id', inplace=True)
# Match the rows that have the same ticker and exchange
df = pd.merge(table_df, q_codes_df, how='inner',
on=['ticker', 'exchange'])
df = df[['symbol_id', 'ticker', 'exchange', 'sector', 'industry',
'sub_industry', 'currency', 'hq_country', 'created_date',
'updated_date']]
else:
exchanges = ['NYSE', 'NYSEMKT', 'NYSEARCA', 'NASDAQ']
cur.execute("""SELECT symbol_id, component, data
FROM quandl_codes""")
data = cur.fetchall()
q_codes_df = pd.DataFrame(data, columns=['symbol_id', 'ticker',
'exchange'])
q_codes_df.drop_duplicates('symbol_id', inplace=True)
# Match the rows that have the same ticker and exchange
# Broke the merge into two steps, involving an intermediary table
df = pd.merge(table_df, q_codes_df, how='left', on='ticker')
df = df[df['exchange'].isin(exchanges)]
df = df.drop(['ticker', 'exchange'], axis=1)
df.rename(columns={'index': 'stock_index'}, inplace=True)
df = df[['stock_index', 'symbol_id', 'as_of', 'created_date',
'updated_date']]
# ToDo: Implement a way to show the tickers that are not included
return df
def load_to_sql(self, tables_to_load, table_location):
""" The main function that processes and loads the auxiliary data into
the database. For each table listed in the tables_to_load list, their
CSV file is loaded and the data moved into the SQL database. If the
table is for indices, the CSV data is passed to the find_symbol_id
function, where the ticker is replaced with it's respective symbol_id.
:param tables_to_load: List of strings
:param table_location: String of the directory for the load tables
:return: Nothing. Data is just loaded into the SQL database.
"""
start_time = time.time()
for table, query in tables.items():
if table in tables_to_load:
try:
file = os.path.abspath(os.path.join(table_location,
table + '.csv'))
table_df = pd.read_csv(file, encoding='ISO-8859-1')
except Exception as e:
print('Unable to load the %s csv load file. Skipping it' %
table)
print(e)
continue
if table == 'indices' or table == 'tickers':
# ToDo: Re-implement these tables; need symbol_id
print('Unable to process indices and tickers table '
'since there is no system to create a unique '
'symbol_id for each item.')
continue
# # Removes the column that has the company's name
# table_df.drop('ticker_name', 1, inplace=True)
# # Finds the tsid for each ticker
# table_df = self.find_tsid(table_df)
# if table == 'tickers':
# table_df.to_csv('load_tables/tickers_df.csv',
# index=False)
# Retrieve any existing values for this table
existing_df = query_load_table(
database=self.database, user=self.user,
password=self.password, host=self.host, port=self.port,
table=table)
# Find the values that are different between the two DataFrames
altered_df = self.altered_values(
existing_df=existing_df, new_df=table_df)
altered_df.insert(len(altered_df.columns), 'created_date',
datetime.now().isoformat())
altered_df.insert(len(altered_df.columns), 'updated_date',
datetime.now().isoformat())
# Get the id column for the current table (first column)
id_col_name = list(altered_df.columns.values)[0]
# Separate out the new and updated values from the altered_df
new_df = (altered_df[~altered_df[id_col_name].
isin(existing_df[id_col_name])])
updated_df = (altered_df[altered_df[id_col_name].
isin(existing_df[id_col_name])])
# Update all modified values within the database
update_load_table(database=self.database, user=self.user,
password=self.password, host=self.host,
port=self.port, values_df=updated_df,
table=table)
# Append all new values to the database
df_to_sql(database=self.database, user=self.user,
password=self.password, host=self.host,
port=self.port, df=new_df, sql_table=table,
exists='append', item=table)
print('Loaded %s into the %s database' %
(table, self.database))
load_tables_excluded = [table for table in tables_to_load
if table not in tables.keys()]
if load_tables_excluded:
print('Unable to load the following tables: %s' %
(", ".join(load_tables_excluded)))
print("If the CSV file exists, make sure it's name matches the "
"name in the tables dictionary.")
print('Finished loading all selected tables taking %0.1f seconds'
% (time.time() - start_time))
# NOTE: make sure the table name (dict key) matches the csv load file name
tables = {
'data_vendor': '(%s,%s,%s,%s,%s,%s,%s,%s)',
'exchanges': '(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
'tickers': '(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
'indices': '(NULL,%s,%s,%s,%s,%s,%s)',
}
| agpl-3.0 |
fspaolo/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 5 | 1650 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
by manually adding non-linear features. Kernel methods extend this idea and can
induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn.linear_model import Ridge
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
pl.plot(x_plot, f(x_plot), label="ground truth")
pl.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
ridge = Ridge()
ridge.fit(np.vander(x, degree + 1), y)
pl.plot(x_plot, ridge.predict(np.vander(x_plot, degree + 1)),
label="degree %d" % degree)
pl.legend(loc='lower left')
pl.show()
| bsd-3-clause |
jereze/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Borillion/mplh5canvas | tests/test.py | 4 | 6595 | #!/usr/bin/python
# The MPLH5Canvas test suite.
#
# Basically this runs against a directory of matplotlib examples (try examples/api or examples/pylab in the matplotlib source tree)
# and produces a test.html file in ./output that when shown in a browser displays a MPLH5Canvas, PNG and SVG rendering for each
# example in the test set.
#
# This allows rapid fault finding through direct comparisons with known good backends...
from optparse import OptionParser
import os
import sys
parser = OptionParser()
parser.add_option("-d", "--dir", type="string", default=".",help="Specify the directory containing examples to be tested. [default=%default]")
parser.add_option("-f", "--file", type="string", default=None, help="Specify a single file to test. Overrides any directory specified. [default=%default]")
parser.add_option("-w","--wildcard", type="string", default="*", help="Match the filenames to use. e.g. * for all, image_ for all image demos. [default=%default]")
parser.add_option("-c", "--crash", action="store_true", default=False, help="Do not catch script exceptions. [default=%default]")
(options, args) = parser.parse_args()
# Matplotlib examples that should be excluded as they bork things.
exclusions = [# mpl_examples/pylab_examples
'__init__.py', 'customize_rc.py', 'to_numeric.py',
'dannys_example.py', # uses TeX - which borks matplotlib if you don't have TeX installed
'demo_tight_layout.py', # expects Canvas.get_renderer method, then falls back to Agg backend at very inopportune moment
'hexbin_demo.py', 'hexbin_demo2.py', # seems to create an infinite length polygon...
'pcolor_demo.py', 'pcolor_log.py', # what the heck is this rubbish. imshow ftw...
'scatter_profile.py','quadmesh_demo.py', # very slow for now
'tex_unicode_demo.py','tex_demo.py', # uses TeX
'usetex_fonteffects.py','usetex_demo.py','usetex_baseline_test.py', # uses TeX
]
files = []
p = os.listdir(options.dir + os.sep)
if options.wildcard == "*": options.wildcard = ""
while p:
x = p.pop()
if x != sys.argv[0] and x.endswith(".py") and x not in exclusions and x.startswith(options.wildcard):
files.append(x)
if options.file:
options.dir = os.path.dirname(options.file)
files = [os.path.basename(options.file)]
print "Dir:",options.dir,",Files:",files
if files == []:
print "No .py files found in the specified directory (%s)" % options.dir
sys.exit(0)
import matplotlib
import mplh5canvas.backend_h5canvas
mplh5canvas.backend_h5canvas._test = True
mplh5canvas.backend_h5canvas._quiet = True
# The 'use' command has to happen *before* pylab is touched
matplotlib.use('module://mplh5canvas.backend_h5canvas')
from pylab import savefig, gcf, switch_backend
html = "<html><head><script>function resize_canvas(id, w, h) { } var id=-1; var ax_bb = new Array(); var native_w = new Array(); var native_h = new Array();</script></head><body><table>"
html += "<tr><th>File<th>H5 Canvas<th>PNG<th>SVG</tr>"
thtml = "<html><head><body><table><tr><th>File<th>H5 Canvas (PNG from Chrome 4.0 OSX)<th>PNG</tr>"
files.sort()
for count, filename in enumerate(files):
print "Running %s\n" % filename
html += "<tr><th id='name_" + str(count) + "'>" + filename
thtml += "<tr><th id='name_" + str(count) + "'>" + filename
try:
execfile(os.path.join(options.dir, filename))
f = gcf()
f.canvas.draw()
except Exception, e:
print "Failed to run script %s. (%s)" % (filename, str(e))
html += "<td>Failed to execute script.<td>Failed to execute script.<td>Failed to execute script.</tr>"
thtml += "<td>Failed to execute script.<td>Failed to execute script.<td>Failed to execute script.</tr>"
if options.crash:
raise
else:
continue
f = gcf()
f.canvas.draw(ctx_override="c_" + str(count))
w, h = f.get_size_inches() * f.get_dpi()
png_filename = filename[:-2] + "png"
try:
html += "<td><canvas width='%dpx' height='%dpx' id='canvas_%d'>" % (w, h, count,)
html += "\n<script>var c_%d = document.getElementById('canvas_%d').getContext('2d');\n" % (count, count)
html += f.canvas._frame_extra + "\n" + f.canvas._header + "\n" + f.canvas._frame
html += "\nframe_header();\n"
html += "\n</script></canvas>"
thtml += "<td><img src='%s' width='%dpx' height='%dpx' />" % ("h5canvas_" + png_filename, w, h)
except Exception, e:
print "Failed to create Canvas for %s. (%s)" % (filename, str(e))
html += "<td>Failed to create Canvas"
thtml += "<td>Failed to create Canvas"
if options.crash:
raise
try:
f.canvas.print_png(os.path.join(".", "output", png_filename), dpi=f.dpi)
html += "<td><img src='%s' width='%dpx' height='%dpx' />" % (png_filename, w, h)
thtml += "<td><img src='%s' width='%dpx' height='%dpx' />" % (png_filename, w, h)
except Exception, e:
print "Failed to create PNG for %s. (%s)" % (filename, str(e))
html += "<td>Failed to create PNG"
thtml += "<td>Failed to create PNG"
if options.crash:
raise
try:
svg_filename = filename[:-2] + "svg"
f.canvas.print_svg(os.path.join(".", "output", svg_filename), dpi=f.dpi)
html += "<td><img src='%s' width='%dpx' height='%dpx' />" % (svg_filename, w, h)
except Exception, e:
print "Failed to create SVG for %s. (%s)" % (filename, str(e))
html += "<td>Failed to create SVG"
if options.crash:
raise
switch_backend('module://mplh5canvas.backend_h5canvas')
html += "</tr>"
thtml += "</tr>"
print "Finished processing files..."
ip = mplh5canvas.backend_h5canvas.h5m._external_ip()
html += "</table><script> var total_plots = " + str(count) + "; "
pi = """
function connect() {
s = new WebSocket("ws://%s:8123");
}
function put_images() {
for (var i=0; i<total_plots+1;i++) {
try {
s.send(document.getElementById("name_"+i).innerText.split(".py")[0] + ".png " + document.getElementById("canvas_"+i).toDataURL());
} catch (err) {}
}
}""" % (ip,)
html += pi +"</script><input type='button' onclick='put_images()' value='Put Images to server'>"
html += "<input type='button' onclick='connect()' value='Connect'></body></html>"
thtml += "</table></body></html>"
f = open(os.path.join("output", "test.html"), "w")
f.write(html)
f.close()
f = open(os.path.join("output", "test_rendered.html"), "w")
f.write(thtml)
f.close()
| bsd-3-clause |
NP-Omix/BioCompass | BioCompass/BioCompass.py | 2 | 11128 |
import os
import re
import time
import json
from collections import OrderedDict
import pkg_resources
import pandas as pd
from Bio import SeqIO
from Bio import Entrez
Entrez.email = "testing@ucsd.edu"
def untag(rule):
return re.sub('\?P<.*?>', '', rule)
def parse_antiSMASH(content):
""" Parse antiSMASH output
"""
rule_table_genes = r"""
(?P<subject_gene> \w+ \"?) \t
\w+ \t
(?P<location_start> \d+) \t
(?P<location_end> \d+) \t
(?P<strands> [+|-]) \t
(?P<product> .*) \n
"""
rule_table_blasthit = r"""
(?P<query_gene> \w+ )\"? \t
(?P<subject_gene> \w+ )\"? \t
(?P<identity> \d+) \t
(?P<blast_score> \d+) \t
(?P<coverage> \d+(?:\.\d+)?) \t
(?P<evalue> \d+\.\d+e[+|-]\d+) \t
\n
"""
rule_query_cluster = r"""
(?P<query_gene> \w+) \s+
(?P<location_start> \d+) \s
(?P<location_end> \d+) \s
(?P<strands> [+|-]) \s
(?P<product> \w+ (?:\s \w+)?) \s* \n+
"""
rule_detail = r"""
>>\n
(?P<id>\d+) \. \s+
(?P<cluster_subject> (?P<locus>\w+)_(?P<cluster>\w+)) \n
Source: \s+ (?P<source>.+?) \s* \n
Type: \s+ (?P<type>.+) \s* \n
Number\ of\ proteins\ with\ BLAST\ hits\ to\ this\ cluster:\ (?P<n_hits> \d+ ) \n
Cumulative\ BLAST\ score:\ (?P<cum_BLAST_score> \d+ )
\n \n
Table\ of\ genes,\ locations,\ strands\ and\ annotations\ of\ subject\ cluster:\n
(?P<TableGenes>
(
""" + untag(rule_table_genes) + r"""
)+
)
\n
Table\ of\ Blast\ hits\ \(query\ gene,\ subject\ gene,\ %identity,\ blast\ score,\ %coverage,\ e-value\): \n
(?P<BlastHit>
(\w+ \t \w+ \"? \t \d+ \t \d+ \t \d+\.\d+ \t \d+\.\d+e[+|-]\d+ \t \n)+
)
\n+
"""
rule = r"""
^
ClusterBlast\ scores\ for\ (?P<target>.*)\n+
Table\ of\ genes,\ locations,\ strands\ and\ annotations\ of\ query\ cluster:\n+
(?P<QueryCluster>
(
""" + untag(rule_query_cluster) + r"""
)+
)
\n \n+
Significant \ hits:\ \n
(?P<SignificantHits>
(\d+ \. \ \w+ \t .* \n+)+
)
\n \n
(?P<Details>
Details:\n\n
(
""" + untag(rule_detail) + r"""
)+
)
\n*
$
"""
parsed = re.search(rule, content, re.VERBOSE).groupdict()
output = {}
for k in ['target', 'QueryCluster', 'SignificantHits']:
output[k] = parsed[k]
QueryCluster = OrderedDict()
for k in re.search(
rule_query_cluster, parsed['QueryCluster'],
re.VERBOSE).groupdict().keys():
QueryCluster[k] = []
for row in re.finditer(
rule_query_cluster, parsed['QueryCluster'], re.VERBOSE):
row = row.groupdict()
for k in row:
QueryCluster[k].append(row[k])
output['QueryCluster'] = QueryCluster
output['SignificantHits'] = OrderedDict()
for row in re.finditer(
r"""(?P<id>\d+) \. \ (?P<cluster_subject> (?P<locus>\w+)_(?P<locus_cluster>\w+)) \t (?P<description>.*) \n+""", parsed['SignificantHits'], re.VERBOSE):
hit = row.groupdict()
cs = hit['cluster_subject']
if cs not in output['SignificantHits']:
output['SignificantHits'][cs] = OrderedDict()
for v in ['id', 'description', 'locus', 'locus_cluster']:
output['SignificantHits'][cs][v] = hit[v]
for block in re.finditer(rule_detail, parsed['Details'], re.VERBOSE):
block = dict(block.groupdict())
content = block['TableGenes']
block['TableGenes'] = OrderedDict()
for k in re.findall('\(\?P<(.*?)>', rule_table_genes):
block['TableGenes'][k] = []
for row in re.finditer(rule_table_genes, content, re.VERBOSE):
row = row.groupdict()
for k in row:
block['TableGenes'][k].append(row[k])
content = block['BlastHit']
block['BlastHit'] = OrderedDict()
for k in re.findall('\(\?P<(.*?)>', rule_table_blasthit):
block['BlastHit'][k] = []
for row in re.finditer(rule_table_blasthit, content, re.VERBOSE):
row = row.groupdict()
for k in row:
block['BlastHit'][k].append(row[k])
for k in block:
output['SignificantHits'][block['cluster_subject']][k] = block[k]
return output
def antiSMASH_to_dataFrame(content):
""" Extract an antiSMASH file as a pandas.DataFrame
"""
parsed = parse_antiSMASH(content)
output = pd.DataFrame()
for cs in parsed['SignificantHits']:
clusterSubject = parsed['SignificantHits'][cs].copy()
df = pd.merge(
pd.DataFrame(clusterSubject['BlastHit']),
pd.DataFrame(clusterSubject['TableGenes']),
on='subject_gene', how='outer')
del(clusterSubject['BlastHit'])
del(clusterSubject['TableGenes'])
for v in clusterSubject:
df[v] = clusterSubject[v]
output = output.append(df, ignore_index=True)
return output
class antiSMASH_file(object):
""" A class to handle antiSMASH file output.
"""
def __init__(self, filename):
self.data = {}
self.load(filename)
def __getitem__(self, item):
return self.data[item]
def keys(self):
return self.data.keys()
def load(self, filename):
self.data = {}
with open(filename, 'r') as f:
parsed = parse_antiSMASH(f.read())
for v in parsed:
self.data[v] = parsed[v]
def efetch_hit(term, seq_start, seq_stop):
""" Fetch the relevant part of a hit
"""
db = "nucleotide"
maxtry = 3
ntry = -1
downloaded = False
while ~downloaded and (ntry <= maxtry):
ntry += 1
try:
handle = Entrez.esearch(db=db, term=term)
record = Entrez.read(handle)
assert len(record['IdList']) == 1, \
"Sorry, I'm not ready to handle more than one record"
handle = Entrez.efetch(db=db, rettype="gb", retmode="text",
id=record['IdList'][0],
seq_start=seq_start, seq_stop=seq_stop)
content = handle.read()
downloaded = True
except:
nap = ntry*3
print "Fail to download (term). I'll take a nap of %s seconds ", \
" and try again."
time.sleep(ntry*3)
return content
def download_hits(filename, output_path):
""" Download the GenBank block for all hits by antiSMASH
"""
c = antiSMASH_file(filename)
for cs in c['SignificantHits'].keys():
locus = c['SignificantHits'][cs]['locus']
table_genes = c['SignificantHits'][cs]['TableGenes']
filename_out = os.path.join(
output_path,
"%s_%s-%s.gbk" % (locus,
min(table_genes['location_start']),
max(table_genes['location_end'])))
if os.path.isfile(filename_out):
print "Already downloaded %s" % filename_out
else:
print "Requesting cluster_subject: %s, start: %s, end: %s" % (
locus,
min(table_genes['location_start']),
max(table_genes['location_end']))
content = efetch_hit(
term=locus,
seq_start=min(table_genes['location_start']),
seq_stop=max(table_genes['location_end']))
print "Saving %s" % filename_out
with open(filename_out, 'w') as f:
f.write(content)
import urlparse
import urllib2
import tempfile
import tarfile
import os
def download_mibig(outputdir, version='1.3'):
""" Download and extract MIBiG files into outputdir
"""
assert version in ['1.0', '1.1', '1.2', '1.3'], \
"Invalid version of MIBiG"
server = 'http://mibig.secondarymetabolites.org'
filename = "mibig_gbk_%s.tar.gz" % version
url = urlparse.urljoin(server, filename)
with tempfile.NamedTemporaryFile(delete=True) as f:
u = urllib2.urlopen(url)
f.write(u.read())
f.file.flush()
tar = tarfile.open(f.name)
tar.extractall(path=outputdir)
tar.close()
# MIBiG was packed with strange files ._*gbk. Let's remove it
for f in [f for f in os.listdir(outputdir) if f[:2] == '._']:
os.remove(os.path.join(outputdir, f))
#def gbk2tablegen(gb_file, strain_id=None):
#def cds_from_gbk(gb_file, strain_id=None):
def cds_from_gbk(gb_file):
gb_record = SeqIO.read(open(gb_file,"rU"), "genbank")
#if strain_id is not None:
# gb_record.id = strain_id
output = pd.DataFrame()
sign = lambda x: '+' if x > 0 else '-'
for feature in gb_record.features:
if feature.type == "CDS":
tmp = {}
tmp = {'BGC': gb_record.id,
'locus_tag': feature.qualifiers['locus_tag'][0],
'start': feature.location.start.position,
'stop': feature.location.end.position,
'strand': sign(feature.location.strand) }
if 'note' in feature.qualifiers:
for note in feature.qualifiers['note']:
product = re.search( r"""smCOG: \s (?P<product>.*?) \s+ \(Score: \s* (?P<score>.*); \s* E-value: \s (?P<e_value>.*?)\);""", note, re.VERBOSE)
if product is not None:
product = product.groupdict()
product['score'] = float(product['score'])
product['e_value'] = float(product['e_value'])
for p in product:
tmp[p] = product[p]
output = output.append(pd.Series(tmp), ignore_index=True)
return output
def find_category_from_product(df):
subcluster = json.loads(
pkg_resources.resource_string(
__name__, 'subcluster_dictionary.json'))
def get_category(product):
for s in subcluster:
if re.search(s, product):
return subcluster[s]
return 'hypothetical'
idx = df['product'].notnull()
df['category'] = df.loc[idx, 'product'].apply(get_category)
df['category'].fillna('hypothetical', inplace=True)
return df
def get_hits(filename, criteria='cum_BLAST_score'):
"""
Reproduces original Tiago's code: table_1_extender.py
In the future allow different criteria. Right now it takes
from the very first block, which has the highest Cumulative
BLAST.
"""
with open(filename) as f:
df = antiSMASH_to_dataFrame(f.read())
df.dropna(subset=['query_gene'], inplace=True)
df.sort_values(by=criteria, ascending=False, na_position='last',
inplace=True)
return df.groupby('query_gene', as_index=False).first()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 19 | 2844 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
"""Tests the Silhouette Coefficient. """
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
"""Assert Silhouette Coefficient != nan when there is 1 sample in a class.
This tests for the condition that caused issue 960.
"""
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
"""Assert 1 < n_labels < n_samples"""
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
bikong2/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
esiivola/GPYgradients | GPy/plotting/matplot_dep/defaults.py | 7 | 3838 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from matplotlib import cm
from .. import Tango
'''
This file is for defaults for the gpy plot, specific to the plotting library.
Create a kwargs dictionary with the right name for the plotting function
you are implementing. If you do not provide defaults, the default behaviour of
the plotting library will be used.
In the code, always ise plotting.gpy_plots.defaults to get the defaults, as
it gives back an empty default, when defaults are not defined.
'''
# Data plots:
data_1d = dict(lw=1.5, marker='x', color='k')
data_2d = dict(s=35, edgecolors='none', linewidth=0., cmap=cm.get_cmap('hot'), alpha=.5)
inducing_1d = dict(lw=0, s=500, color=Tango.colorsHex['darkRed'])
inducing_2d = dict(s=17, edgecolor='k', linewidth=.4, color='white', alpha=.5, marker='^')
inducing_3d = dict(lw=.3, s=500, color=Tango.colorsHex['darkRed'], edgecolor='k')
xerrorbar = dict(color='k', fmt='none', elinewidth=.5, alpha=.5)
yerrorbar = dict(color=Tango.colorsHex['darkRed'], fmt='none', elinewidth=.5, alpha=.5)
# GP plots:
meanplot_1d = dict(color=Tango.colorsHex['mediumBlue'], linewidth=2)
meanplot_2d = dict(cmap='hot', linewidth=.5)
meanplot_3d = dict(linewidth=0, antialiased=True, cstride=1, rstride=1, cmap='hot', alpha=.3)
samples_1d = dict(color=Tango.colorsHex['mediumBlue'], linewidth=.3)
samples_3d = dict(cmap='hot', alpha=.1, antialiased=True, cstride=1, rstride=1, linewidth=0)
confidence_interval = dict(edgecolor=Tango.colorsHex['darkBlue'], linewidth=.5, color=Tango.colorsHex['lightBlue'],alpha=.2)
density = dict(alpha=.5, color=Tango.colorsHex['lightBlue'])
# GPLVM plots:
data_y_1d = dict(linewidth=0, cmap='RdBu', s=40)
data_y_1d_plot = dict(color='k', linewidth=1.5)
# Kernel plots:
ard = dict(edgecolor='k', linewidth=1.2)
# Input plots:
latent = dict(aspect='auto', cmap='Greys', interpolation='bicubic')
gradient = dict(aspect='auto', cmap='RdBu', interpolation='nearest', alpha=.7)
magnification = dict(aspect='auto', cmap='Greys', interpolation='bicubic')
latent_scatter = dict(s=20, linewidth=.2, edgecolor='k', alpha=.9)
annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7)
| bsd-3-clause |
oaastest/Azure-MachineLearning-ClientLibrary-Python | setup.py | 1 | 2428 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#--------------------------------------------------------------------------
from setuptools import setup
# To build:
# python setup.py sdist
# python setup.py bdist_wheel
#
# To install:
# python setup.py install
#
# To register (only needed once):
# python setup.py register
#
# To upload:
# python setup.py sdist upload
# python setup.py bdist_wheel upload
setup(
name='azureml',
version='0.2.2',
description='Microsoft Azure Machine Learning Python client library',
license='MIT License',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/Azure-MachineLearning-ClientLibrary-Python',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
],
packages=['azureml'],
install_requires=[
'python-dateutil',
'requests',
'pandas',
]
)
| mit |
ndingwall/scikit-learn | examples/cluster/plot_coin_segmentation.py | 17 | 2948 | """
================================================
Segmenting the picture of greek coins in regions
================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
import skimage
from skimage.data import coins
from skimage.transform import rescale
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.fixes import parse_version
# these were introduced in skimage-0.14
if parse_version(skimage.__version__) >= parse_version('0.14'):
rescale_params = {'anti_aliasing': False, 'multichannel': False}
else:
rescale_params = {}
# load the coins as a numpy array
orig_coins = coins()
# Resize it to 20% of the original size to speed up the processing
# Applying a Gaussian filter for smoothing prior to down-scaling
# reduces aliasing artifacts.
smoothened_coins = gaussian_filter(orig_coins, sigma=2)
rescaled_coins = rescale(smoothened_coins, 0.2, mode="reflect",
**rescale_params)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(rescaled_coins)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 10
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
# %%
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=42)
t1 = time.time()
labels = labels.reshape(rescaled_coins.shape)
plt.figure(figsize=(5, 5))
plt.imshow(rescaled_coins, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l,
colors=[plt.cm.nipy_spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
xubenben/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
z0rkuM/stockbros | data_generation/indicators/indicatorslib.py | 1 | 1283 | import pandas as pd
import numpy as np
def rsi(close, window_length):
delta = close.diff()[1:]
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
roll_up1 = pd.stats.moments.ewma(up, window_length)
roll_down1 = pd.stats.moments.ewma(down.abs(), window_length)
RS1 = roll_up1 / roll_down1
RSI1 = 100.0 - (100.0 / (1.0 + RS1))
return RSI1
def alternation(diffs):
count = 0
for i in range(len(diffs)):
if i > 0 and ((diffs[i] > 0 and diffs[i-1] <= 0) or (diffs[i] < 0 and diffs[i-1] >= 0)):
count = count + 1
return (count * 100)/(len(diffs) - 1)
def atr(dataframe):
shifted = dataframe['Close'].shift()
dataframe['ATR1'] = abs(dataframe['High'] - dataframe['Low'])
dataframe['ATR2'] = abs(dataframe['High'] - shifted)
dataframe['ATR3'] = abs(dataframe['Low'] - shifted)
dataframe['TrueRange'] = dataframe[['ATR1', 'ATR2', 'ATR3']].max(axis=1)
dataframe['ATR'] = dataframe['TrueRange']
dataframe['ATR'].values[0] = np.mean(dataframe['ATR'].values[0:14])
for i in range(len(dataframe['ATR'].values)):
if i > 0:
dataframe['ATR'].values[i] = (dataframe['ATR'].values[i-1] * 13 + dataframe['ATR'].values[i]) / 14
return dataframe
| mit |
wdurhamh/statsmodels | statsmodels/duration/hazard_regression.py | 9 | 60888 | import numpy as np
from statsmodels.base import model
import statsmodels.base.model as base
from statsmodels.tools.decorators import cache_readonly
from scipy.optimize import brent
"""
Implementation of proportional hazards regression models for duration
data that may be censored ("Cox models").
References
----------
T Therneau (1996). Extending the Cox model. Technical report.
http://www.mayo.edu/research/documents/biostat-58pdf/DOC-10027288
G Rodriguez (2005). Non-parametric estimation in survival models.
http://data.princeton.edu/pop509/NonParametricSurvival.pdf
B Gillespie (2006). Checking the assumptions in the Cox proportional
hazards model.
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf
"""
_predict_docstring = """
Returns predicted values from the proportional hazards
regression model.
Parameters
----------
params : array-like
The proportional hazards model parameters.
exog : array-like
Data to use as `exog` in forming predictions. If not
provided, the `exog` values from the model used to fit the
data are used.%(cov_params_doc)s
endog : array-like
Duration (time) values at which the predictions are made.
Only used if pred_type is either 'cumhaz' or 'surv'. If
using model `exog`, defaults to model `endog` (time), but
may be provided explicitly to make predictions at
alternative times.
strata : array-like
A vector of stratum values used to form the predictions.
Not used (may be 'None') if pred_type is 'lhr' or 'hr'.
If `exog` is None, the model stratum values are used. If
`exog` is not None and pred_type is 'surv' or 'cumhaz',
stratum values must be provided (unless there is only one
stratum).
offset : array-like
Offset values used to create the predicted values.
pred_type : string
If 'lhr', returns log hazard ratios, if 'hr' returns
hazard ratios, if 'surv' returns the survival function, if
'cumhaz' returns the cumulative hazard function.
Returns
-------
A bunch containing two fields: `predicted_values` and
`standard_errors`.
Notes
-----
Standard errors are only returned when predicting the log
hazard ratio (pred_type is 'lhr').
Types `surv` and `cumhaz` require estimation of the cumulative
hazard function.
"""
_predict_cov_params_docstring = """
cov_params : array-like
The covariance matrix of the estimated `params` vector,
used to obtain prediction errors if pred_type='lhr',
otherwise optional."""
class PHSurvivalTime(object):
def __init__(self, time, status, exog, strata=None, entry=None,
offset=None):
"""
Represent a collection of survival times with possible
stratification and left truncation.
Parameters
----------
time : array_like
The times at which either the event (failure) occurs or
the observation is censored.
status : array_like
Indicates whether the event (failure) occurs at `time`
(`status` is 1), or if `time` is a censoring time (`status`
is 0).
exog : array_like
The exogeneous (covariate) data matrix, cases are rows and
variables are columns.
strata : array_like
Grouping variable defining the strata. If None, all
observations are in a single stratum.
entry : array_like
Entry (left truncation) times. The observation is not
part of the risk set for times before the entry time. If
None, the entry time is treated as being zero, which
gives no left truncation. The entry time must be less
than or equal to `time`.
offset : array-like
An optional array of offsets
"""
# Default strata
if strata is None:
strata = np.zeros(len(time), dtype=np.int32)
# Default entry times
if entry is None:
entry = np.zeros(len(time))
# Parameter validity checks.
n1, n2, n3, n4 = len(time), len(status), len(strata),\
len(entry)
nv = [n1, n2, n3, n4]
if max(nv) != min(nv):
raise ValueError("endog, status, strata, and " +
"entry must all have the same length")
if min(time) < 0:
raise ValueError("endog must be non-negative")
if min(entry) < 0:
raise ValueError("entry time must be non-negative")
# In Stata, this is entry >= time, in R it is >.
if np.any(entry > time):
raise ValueError("entry times may not occur " +
"after event or censoring times")
# Get the row indices for the cases in each stratum
if strata is not None:
stu = np.unique(strata)
#sth = {x: [] for x in stu} # needs >=2.7
sth = dict([(x, []) for x in stu])
for i,k in enumerate(strata):
sth[k].append(i)
stratum_rows = [np.asarray(sth[k], dtype=np.int32) for k in stu]
stratum_names = stu
else:
stratum_rows = [np.arange(len(time)),]
stratum_names = [0,]
# Remove strata with no events
ix = [i for i,ix in enumerate(stratum_rows) if status[ix].sum() > 0]
stratum_rows = [stratum_rows[i] for i in ix]
stratum_names = [stratum_names[i] for i in ix]
# The number of strata
nstrat = len(stratum_rows)
self.nstrat = nstrat
# Remove subjects whose entry time occurs after the last event
# in their stratum.
for stx,ix in enumerate(stratum_rows):
last_failure = max(time[ix][status[ix] == 1])
# Stata uses < here, R uses <=
ii = [i for i,t in enumerate(entry[ix]) if
t <= last_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Remove subjects who are censored before the first event in
# their stratum.
for stx,ix in enumerate(stratum_rows):
first_failure = min(time[ix][status[ix] == 1])
ii = [i for i,t in enumerate(time[ix]) if
t >= first_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Order by time within each stratum
for stx,ix in enumerate(stratum_rows):
ii = np.argsort(time[ix])
stratum_rows[stx] = stratum_rows[stx][ii]
if offset is not None:
self.offset_s = []
for stx in range(nstrat):
self.offset_s.append(offset[stratum_rows[stx]])
else:
self.offset_s = None
# Number of informative subjects
self.n_obs = sum([len(ix) for ix in stratum_rows])
# Split everything by stratum
self.time_s = []
self.exog_s = []
self.status_s = []
self.entry_s = []
for ix in stratum_rows:
self.time_s.append(time[ix])
self.exog_s.append(exog[ix,:])
self.status_s.append(status[ix])
self.entry_s.append(entry[ix])
self.stratum_rows = stratum_rows
self.stratum_names = stratum_names
# Precalculate some indices needed to fit Cox models.
# Distinct failure times within a stratum are always taken to
# be sorted in ascending order.
#
# ufailt_ix[stx][k] is a list of indices for subjects who fail
# at the k^th sorted unique failure time in stratum stx
#
# risk_enter[stx][k] is a list of indices for subjects who
# enter the risk set at the k^th sorted unique failure time in
# stratum stx
#
# risk_exit[stx][k] is a list of indices for subjects who exit
# the risk set at the k^th sorted unique failure time in
# stratum stx
self.ufailt_ix, self.risk_enter, self.risk_exit, self.ufailt =\
[], [], [], []
for stx in range(self.nstrat):
# All failure times
ift = np.flatnonzero(self.status_s[stx] == 1)
ft = self.time_s[stx][ift]
# Unique failure times
uft = np.unique(ft)
nuft = len(uft)
# Indices of cases that fail at each unique failure time
#uft_map = {x:i for i,x in enumerate(uft)} # requires >=2.7
uft_map = dict([(x, i) for i,x in enumerate(uft)]) # 2.6
uft_ix = [[] for k in range(nuft)]
for ix,ti in zip(ift,ft):
uft_ix[uft_map[ti]].append(ix)
# Indices of cases (failed or censored) that enter the
# risk set at each unique failure time.
risk_enter1 = [[] for k in range(nuft)]
for i,t in enumerate(self.time_s[stx]):
ix = np.searchsorted(uft, t, "right") - 1
if ix >= 0:
risk_enter1[ix].append(i)
# Indices of cases (failed or censored) that exit the
# risk set at each unique failure time.
risk_exit1 = [[] for k in range(nuft)]
for i,t in enumerate(self.entry_s[stx]):
ix = np.searchsorted(uft, t)
risk_exit1[ix].append(i)
self.ufailt.append(uft)
self.ufailt_ix.append([np.asarray(x, dtype=np.int32) for x in uft_ix])
self.risk_enter.append([np.asarray(x, dtype=np.int32) for x in risk_enter1])
self.risk_exit.append([np.asarray(x, dtype=np.int32) for x in risk_exit1])
class PHReg(model.LikelihoodModel):
"""
Fit the Cox proportional hazards regression model for right
censored data.
Parameters
----------
endog : array-like
The observed times (event or censoring)
exog : 2D array-like
The covariates or exogeneous variables
status : array-like
The censoring status values; status=1 indicates that an
event occured (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array-like
The entry times, if left truncation occurs
strata : array-like
Stratum labels. If None, all observations are taken to be
in a single stratum.
ties : string
The method used to handle tied times, must be either 'breslow'
or 'efron'.
offset : array-like
Array of offset values
missing : string
The method used to handle missing data
Notes
-----
Proportional hazards regression models should not include an
explicit or implicit intercept. The effect of an intercept is
not identified using the partial likelihood approach.
`endog`, `event`, `strata`, `entry`, and the first dimension
of `exog` all must have the same length
"""
def __init__(self, endog, exog, status=None, entry=None,
strata=None, offset=None, ties='breslow',
missing='drop', **kwargs):
# Default is no censoring
if status is None:
status = np.ones(len(endog))
super(PHReg, self).__init__(endog, exog, status=status,
entry=entry, strata=strata,
offset=offset, missing=missing,
**kwargs)
# endog and exog are automatically converted, but these are
# not
if self.status is not None:
self.status = np.asarray(self.status)
if self.entry is not None:
self.entry = np.asarray(self.entry)
if self.strata is not None:
self.strata = np.asarray(self.strata)
if self.offset is not None:
self.offset = np.asarray(self.offset)
self.surv = PHSurvivalTime(self.endog, self.status,
self.exog, self.strata,
self.entry, self.offset)
# TODO: not used?
self.missing = missing
ties = ties.lower()
if ties not in ("efron", "breslow"):
raise ValueError("`ties` must be either `efron` or " +
"`breslow`")
self.ties = ties
@classmethod
def from_formula(cls, formula, data, status=None, entry=None,
strata=None, offset=None, subset=None,
ties='breslow', missing='drop', *args, **kwargs):
"""
Create a proportional hazards regression model from a formula
and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
status : array-like
The censoring status values; status=1 indicates that an
event occured (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array-like
The entry times, if left truncation occurs
strata : array-like
Stratum labels. If None, all observations are taken to be
in a single stratum.
offset : array-like
Array of offset values
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
ties : string
The method used to handle tied times, must be either 'breslow'
or 'efron'.
missing : string
The method used to handle missing data
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : PHReg model instance
"""
# Allow array arguments to be passed by column name.
if type(status) is str:
status = data[status]
if type(entry) is str:
entry = data[entry]
if type(strata) is str:
strata = data[strata]
if type(offset) is str:
offset = data[offset]
mod = super(PHReg, cls).from_formula(formula, data,
status=status, entry=entry, strata=strata,
offset=offset, subset=subset, ties=ties,
missing=missing, *args, **kwargs)
return mod
def fit(self, groups=None, **args):
"""
Fit a proportional hazards regression model.
Parameters
----------
groups : array-like
Labels indicating groups of observations that may be
dependent. If present, the standard errors account for
this dependence. Does not affect fitted values.
Returns a PHregResults instance.
"""
# TODO process for missing values
if groups is not None:
self.groups = np.asarray(groups)
else:
self.groups = None
if 'disp' not in args:
args['disp'] = False
fit_rslts = super(PHReg, self).fit(**args)
if self.groups is None:
cov_params = fit_rslts.cov_params()
else:
cov_params = self.robust_covariance(fit_rslts.params)
results = PHRegResults(self, fit_rslts.params, cov_params)
return results
def fit_regularized(self, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None,
cnvrg_tol=1e-7, zero_tol=1e-8, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method :
Only the coordinate descent algorithm is implemented.
maxiter : integer
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array-like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in once iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
Returns
-------
A PHregResults object, of the same type returned by `fit`.
Notes
-----
The penalty is the"elastic net" penalty, which
is a convex combination of L1 and L2 penalties.
The function that is minimized is: ..math::
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
"""
k_exog = self.exog.shape[1]
n_exog = self.exog.shape[0]
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog, dtype=np.float64)
# regularization cannot be used with groups
self.groups = None
# Define starting params
if start_params is None:
params = np.zeros(k_exog, dtype=np.float64)
else:
params = start_params.copy()
# Maybe could be a shallow copy, but just in case...
import copy
surv = copy.deepcopy(self.surv)
# This is the base offset, onto which the effects of
# constrained variables are added.
if self.offset is None:
offset_s_base = [np.zeros(len(x)) for x in surv.stratum_rows]
surv.offset_s = [x.copy() for x in offset_s_base]
else:
offset_s_base = [x.copy() for x in surv.offset_s]
# Create a model instance for optimizing a single variable
model_1var = copy.deepcopy(self)
model_1var.surv = surv
model_1var.ties = self.ties
# All the negative penalized loglikeihood functions.
def gen_npfuncs(k):
def nploglike(params):
pen = alpha[k]*((1 - L1_wt)*params**2/2 + L1_wt*np.abs(params))
return -model_1var.loglike(np.r_[params]) / n_exog + pen
def npscore(params):
pen_grad = alpha[k]*(1 - L1_wt)*params
return -model_1var.score(np.r_[params])[0] / n_exog + pen_grad
def nphess(params):
pen_hess = alpha[k]*(1 - L1_wt)
return -model_1var.hessian(np.r_[params])[0,0] / n_exog + pen_hess
return nploglike, npscore, nphess
nploglike_funcs = [gen_npfuncs(k) for k in range(len(params))]
# 1-dimensional exog's
exog_s = []
for k in range(k_exog):
ex = [x[:, k][:, None] for x in surv.exog_s]
exog_s.append(ex)
converged = False
btol = 1e-8
params_zero = np.zeros(len(params), dtype=bool)
for itr in range(maxiter):
# Sweep through the parameters
params_save = params.copy()
for k in range(k_exog):
# Under the active set method, if a parameter becomes
# zero we don't try to change it again.
if params_zero[k]:
continue
# Set exog to include only the variable whose effect
# is being estimated.
surv.exog_s = exog_s[k]
# Set the offset to account for the variables that are
# being held fixed.
params0 = params.copy()
params0[k] = 0
for stx in range(self.surv.nstrat):
v = np.dot(self.surv.exog_s[stx], params0)
surv.offset_s[stx] = offset_s_base[stx] + v
params[k] = _opt_1d(nploglike_funcs[k], params[k],
alpha[k]*L1_wt, tol=btol)
# Update the active set
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
converged = True
break
# Set approximate zero coefficients to be exactly zero
params *= np.abs(params) >= zero_tol
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog), dtype=np.float64)
if len(ii) > 0:
model = self.__class__(self.endog, self.exog[:, ii],
status=self.status, entry=self.entry,
strata=self.strata, offset=self.offset,
ties=self.ties, missing=self.missing)
rslt = model.fit()
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
rfit = PHRegResults(self, params, cov_params=cov)
rfit.converged = converged
rfit.regularized = True
return rfit
def loglike(self, params):
"""
Returns the log partial likelihood function evaluated at
`params`.
"""
if self.ties == "breslow":
return self.breslow_loglike(params)
elif self.ties == "efron":
return self.efron_loglike(params)
def score(self, params):
"""
Returns the score function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_gradient(params)
elif self.ties == "efron":
return self.efron_gradient(params)
def hessian(self, params):
"""
Returns the Hessian matrix of the log partial likelihood
function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_hessian(params)
else:
return self.efron_hessian(params)
def breslow_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Breslow method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += (linpred[ix] - np.log(xp0)).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like
def efron_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
# exog and linear predictor for this stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp0f = e_linpred[uft_ix[i]].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += linpred[ix].sum()
m = len(ix)
J = np.arange(m, dtype=np.float64) / m
like -= np.log(xp0 - J*xp0f).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like
def breslow_gradient(self, params):
"""
Returns the gradient of the log partial likelihood, using the
Breslow method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of subjects in the stratum
strat_ix = surv.stratum_rows[stx]
# Unique failure times in the stratum
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
# exog and linear predictor for the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
# Account for all cases that fail at this point.
ix = uft_ix[i]
grad += (exog_s[ix,:] - xp1 / xp0).sum(0)
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad
def efron_gradient(self, params):
"""
Returns the gradient of the log partial likelihood evaluated
at `params`, using the Efron method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of cases in the stratum
strat_ix = surv.stratum_rows[stx]
# exog and linear predictor of the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
# Consider all cases that fail at this point.
grad += v.sum(0)
m = len(ixf)
J = np.arange(m, dtype=np.float64) / m
numer = xp1 - np.outer(J, xp1f)
denom = xp0 - np.outer(J, xp0f)
ratio = numer / denom
rsum = ratio.sum(0)
grad -= rsum
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad
def breslow_hessian(self, params):
"""
Returns the Hessian of the log partial likelihood evaluated at
`params`, using the Breslow method to handle tied times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 += (mat.T * mat * elx[None,:,None]).sum(1)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
hess += m*(xp2 / xp0 - np.outer(xp1, xp1) / xp0**2)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 -= (mat.T * mat * elx[None,:,None]).sum(1)
return -hess
def efron_hessian(self, params):
"""
Returns the Hessian matrix of the partial log-likelihood
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 += (mat.T * mat * elx[None,:,None]).sum(1)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ixf]
xp2f = (mat.T * mat * elx[None,:,None]).sum(1)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
J = np.arange(m, dtype=np.float64) / m
c0 = xp0 - J*xp0f
mat = (xp2[None,:,:] - J[:,None,None]*xp2f) / c0[:,None,None]
hess += mat.sum(0)
mat = (xp1[None, :] - np.outer(J, xp1f)) / c0[:, None]
mat = mat[:, :, None] * mat[:, None, :]
hess -= mat.sum(0)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 -= (mat.T * mat * elx[None,:,None]).sum(1)
return -hess
def robust_covariance(self, params):
"""
Returns a covariance matrix for the proportional hazards model
regresion coefficient estimates that is robust to certain
forms of model misspecification.
Parameters
----------
params : ndarray
The parameter vector at which the covariance matrix is
calculated.
Returns
-------
The robust covariance matrix as a square ndarray.
Notes
-----
This function uses the `groups` argument to determine groups
within which observations may be dependent. The covariance
matrix is calculated using the Huber-White "sandwich" approach.
"""
if self.groups is None:
raise ValueError("`groups` must be specified to calculate the robust covariance matrix")
hess = self.hessian(params)
score_obs = self.score_residuals(params)
# Collapse
grads = {}
for i,g in enumerate(self.groups):
if g not in grads:
grads[g] = 0.
grads[g] += score_obs[i, :]
grads = np.asarray(list(grads.values()))
mat = grads[None, :, :]
mat = mat.T * mat
mat = mat.sum(1)
hess_inv = np.linalg.inv(hess)
cmat = np.dot(hess_inv, np.dot(mat, hess_inv))
return cmat
def score_residuals(self, params):
"""
Returns the score residuals calculated at a given vector of
parameters.
Parameters
----------
params : ndarray
The parameter vector at which the score residuals are
calculated.
Returns
-------
The score residuals, returned as a ndarray having the same
shape as `exog`.
Notes
-----
Observations in a stratum with no observed events have undefined
score residuals, and contain NaN in the returned matrix.
"""
surv = self.surv
score_resid = np.zeros(self.exog.shape, dtype=np.float64)
# Use to set undefined values to NaN.
mask = np.zeros(self.exog.shape[0], dtype=np.int32)
w_avg = self.weighted_covariate_averages(params)
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
strat_ix = surv.stratum_rows[stx]
xp0 = 0.
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
at_risk_ix = set([])
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
at_risk_ix |= set(ix)
xp0 += e_linpred[ix].sum()
atr_ix = list(at_risk_ix)
leverage = exog_s[atr_ix, :] - w_avg[stx][i, :]
# Event indicators
d = np.zeros(exog_s.shape[0])
d[uft_ix[i]] = 1
# The increment in the cumulative hazard
dchaz = len(uft_ix[i]) / xp0
# Piece of the martingale residual
mrp = d[atr_ix] - e_linpred[atr_ix] * dchaz
# Update the score residuals
ii = strat_ix[atr_ix]
score_resid[ii,:] += leverage * mrp[:, None]
mask[ii] = 1
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
at_risk_ix -= set(ix)
xp0 -= e_linpred[ix].sum()
jj = np.flatnonzero(mask == 0)
if len(jj) > 0:
score_resid[jj, :] = np.nan
return score_resid
def weighted_covariate_averages(self, params):
"""
Returns the hazard-weighted average of covariate values for
subjects who are at-risk at a particular time.
Parameters
----------
params : ndarray
Parameter vector
Returns
-------
averages : list of ndarrays
averages[stx][i,:] is a row vector containing the weighted
average values (for all the covariates) of at-risk
subjects a the i^th largest observed failure time in
stratum `stx`, using the hazard multipliers as weights.
Notes
-----
Used to calculate leverages and score residuals.
"""
surv = self.surv
averages = []
xp0, xp1 = 0., 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
average_s = np.zeros((len(uft_ix), exog_s.shape[1]),
dtype=np.float64)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp1 += np.dot(e_linpred[ix], exog_s[ix, :])
average_s[i, :] = xp1 / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
xp1 -= np.dot(e_linpred[ix], exog_s[ix, :])
averages.append(average_s)
return averages
def baseline_cumulative_hazard(self, params):
"""
Estimate the baseline cumulative hazard and survival
functions.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A list of triples (time, hazard, survival) containing the time
values and corresponding cumulative hazard and survival
function values for each stratum.
Notes
-----
Uses the Nelson-Aalen estimator.
"""
# TODO: some disagreements with R, not the same algorithm but
# hard to deduce what R is doing. Our results are reasonable.
surv = self.surv
rslt = []
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
xp0 = 0.
h0 = np.zeros(nuft, dtype=np.float64)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
h0[i] = len(ix) / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
cumhaz = np.cumsum(h0) - h0
current_strata_surv = np.exp(-cumhaz)
rslt.append([uft, cumhaz, current_strata_surv])
return rslt
def baseline_cumulative_hazard_function(self, params):
"""
Returns a function that calculates the baseline cumulative
hazard function for each stratum.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A dict mapping stratum names to the estimated baseline
cumulative hazard function.
"""
from scipy.interpolate import interp1d
surv = self.surv
base = self.baseline_cumulative_hazard(params)
cumhaz_f = {}
for stx in range(surv.nstrat):
time_h = base[stx][0]
cumhaz = base[stx][1]
time_h = np.r_[-np.inf, time_h, np.inf]
cumhaz = np.r_[cumhaz[0], cumhaz, cumhaz[-1]]
func = interp1d(time_h, cumhaz, kind='zero')
cumhaz_f[self.surv.stratum_names[stx]] = func
return cumhaz_f
def predict(self, params, exog=None, cov_params=None, endog=None,
strata=None, offset=None, pred_type="lhr"):
# docstring attached below
pred_type = pred_type.lower()
if pred_type not in ["lhr", "hr", "surv", "cumhaz"]:
msg = "Type %s not allowed for prediction" % pred_type
raise ValueError(msg)
class bunch:
predicted_values = None
standard_errors = None
ret_val = bunch()
# Don't do anything with offset here because we want to allow
# different offsets to be specified even if exog is the model
# exog.
exog_provided = True
if exog is None:
exog = self.exog
exog_provided = False
lhr = np.dot(exog, params)
if offset is not None:
lhr += offset
# Never use self.offset unless we are also using self.exog
elif self.offset is not None and not exog_provided:
lhr += self.offset
# Handle lhr and hr prediction first, since they don't make
# use of the hazard function.
if pred_type == "lhr":
ret_val.predicted_values = lhr
if cov_params is not None:
mat = np.dot(exog, cov_params)
va = (mat * exog).sum(1)
ret_val.standard_errors = np.sqrt(va)
return ret_val
hr = np.exp(lhr)
if pred_type == "hr":
ret_val.predicted_values = hr
return ret_val
# Makes sure endog is defined
if endog is None and exog_provided:
msg = "If `exog` is provided `endog` must be provided."
raise ValueError(msg)
# Use model endog if using model exog
elif endog is None and not exog_provided:
endog = self.endog
# Make sure strata is defined
if strata is None:
if exog_provided and self.surv.nstrat > 1:
raise ValueError("`strata` must be provided")
if self.strata is None:
strata = [self.surv.stratum_names[0],] * len(endog)
else:
strata = self.strata
cumhaz = np.nan * np.ones(len(endog), dtype=np.float64)
stv = np.unique(strata)
bhaz = self.baseline_cumulative_hazard_function(params)
for stx in stv:
ix = np.flatnonzero(strata == stx)
func = bhaz[stx]
cumhaz[ix] = func(endog[ix]) * hr[ix]
if pred_type == "cumhaz":
ret_val.predicted_values = cumhaz
elif pred_type == "surv":
ret_val.predicted_values = np.exp(-cumhaz)
return ret_val
predict.__doc__ = _predict_docstring % {'cov_params_doc': _predict_cov_params_docstring}
def get_distribution(self, params):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Parameters
----------
params : arrayh-like
The model proportional hazards model parameters.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times wihtin a stratum.
"""
# TODO: this returns a Python list of rv_discrete objects, so
# nothing can be vectorized. It appears that rv_discrete does
# not allow vectorization.
from scipy.stats.distributions import rv_discrete
surv = self.surv
bhaz = self.baseline_cumulative_hazard(params)
# The arguments to rv_discrete_float, first obtained by
# stratum
pk, xk = [], []
for stx in range(self.surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
# The unique failure times for this stratum (the support
# of the distribution).
pts = bhaz[stx][0]
# The individual cumulative hazards for everyone in this
# stratum.
ichaz = np.outer(e_linpred, bhaz[stx][1])
# The individual survival functions.
usurv = np.exp(-ichaz)
usurv = np.concatenate((usurv, np.zeros((usurv.shape[0], 1))),
axis=1)
# The individual survival probability masses.
probs = -np.diff(usurv, 1)
pk.append(probs)
xk.append(np.outer(np.ones(probs.shape[0]), pts))
# Pad to make all strata have the same shape
mxc = max([x.shape[1] for x in xk])
for k in range(self.surv.nstrat):
if xk[k].shape[1] < mxc:
xk1 = np.zeros((xk.shape[0], mxc))
pk1 = np.zeros((pk.shape[0], mxc))
xk1[:, -mxc:] = xk
pk1[:, -mxc:] = pk
xk[k], pk[k] = xk1, pk1
xka = np.nan * np.zeros((len(self.endog), mxc), dtype=np.float64)
pka = np.ones((len(self.endog), mxc), dtype=np.float64) / mxc
for stx in range(self.surv.nstrat):
ix = self.surv.stratum_rows[stx]
xka[ix, :] = xk[stx]
pka[ix, :] = pk[stx]
dist = rv_discrete_float(xka, pka)
return dist
class PHRegResults(base.LikelihoodModelResults):
'''
Class to contain results of fitting a Cox proportional hazards
survival model.
PHregResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Returns
-------
**Attributes**
model : class instance
PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
params : array
The coefficients of the fitted model. Each coefficient is the
log hazard ratio corresponding to a 1 unit difference in a
single covariate while holding the other covariates fixed.
bse : array
The standard errors of the fitted parameters.
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params, covariance_type="naive"):
self.covariance_type = covariance_type
super(PHRegResults, self).__init__(model, params,
normalized_cov_params=cov_params)
@cache_readonly
def standard_errors(self):
"""
Returns the standard errors of the parameter estimates.
"""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def bse(self):
"""
Returns the standard errors of the parameter estimates.
"""
return self.standard_errors
def get_distribution(self):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times wihtin a stratum.
"""
return self.model.get_distribution(self.params)
def predict(self, endog=None, exog=None, strata=None,
offset=None, transform=True, pred_type="lhr"):
# docstring attached below
return super(PHRegResults, self).predict(exog=exog,
transform=transform,
cov_params=self.cov_params(),
endog=endog,
strata=strata,
offset=offset,
pred_type=pred_type)
predict.__doc__ = _predict_docstring % {'cov_params_doc': ''}
def _group_stats(self, groups):
"""
Descriptive statistics of the groups.
"""
gsize = {}
for x in groups:
if x not in gsize:
gsize[x] = 0
gsize[x] += 1
gsize = np.asarray(gsize.values())
return gsize.min(), gsize.max(), gsize.mean()
@cache_readonly
def weighted_covariate_averages(self):
"""
The average covariate values within the at-risk set at each
event time point, weighted by hazard.
"""
return self.model.weighted_covariate_averages(self.params)
@cache_readonly
def score_residuals(self):
"""
A matrix containing the score residuals.
"""
return self.model.score_residuals(self.params)
@cache_readonly
def baseline_cumulative_hazard(self):
"""
A list (corresponding to the strata) containing the baseline
cumulative hazard function evaluated at the event points.
"""
return self.model.baseline_cumulative_hazard(self.params)
@cache_readonly
def baseline_cumulative_hazard_function(self):
"""
A list (corresponding to the strata) containing function
objects that calculate the cumulative hazard function.
"""
return self.model.baseline_cumulative_hazard_function(self.params)
@cache_readonly
def schoenfeld_residuals(self):
"""
A matrix containing the Schoenfeld residuals.
Notes
-----
Schoenfeld residuals for censored observations are set to zero.
"""
surv = self.model.surv
w_avg = self.weighted_covariate_averages
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
sch_resid = np.nan*np.ones(self.model.exog.shape, dtype=np.float64)
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
strat_ix = surv.stratum_rows[stx]
ii = np.searchsorted(uft, time_s)
# These subjects are censored after the last event in
# their stratum, so have empty risk sets and undefined
# residuals.
jj = np.flatnonzero(ii < len(uft))
sch_resid[strat_ix[jj], :] = exog_s[jj, :] - w_avg[stx][ii[jj], :]
jj = np.flatnonzero(self.model.status == 0)
sch_resid[jj, :] = np.nan
return sch_resid
@cache_readonly
def martingale_residuals(self):
"""
The martingale residuals.
"""
surv = self.model.surv
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
mart_resid = np.nan*np.ones(len(self.model.endog), dtype=np.float64)
cumhaz_f_list = self.baseline_cumulative_hazard_function
# Loop over strata
for stx in range(surv.nstrat):
cumhaz_f = cumhaz_f_list[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
linpred = np.dot(exog_s, self.params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
ii = surv.stratum_rows[stx]
chaz = cumhaz_f(time_s)
mart_resid[ii] = self.model.status[ii] - e_linpred * chaz
return mart_resid
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the proportional hazards regression results.
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `x#` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
from statsmodels.compat.collections import OrderedDict
smry = summary2.Summary()
float_format = "%8.3f"
info = OrderedDict()
info["Model:"] = "PH Reg"
if yname is None:
yname = self.model.endog_names
info["Dependent variable:"] = yname
info["Ties:"] = self.model.ties.capitalize()
info["Sample size:"] = str(self.model.surv.n_obs)
info["Num. events:"] = str(int(sum(self.model.status)))
if self.model.groups is not None:
mn, mx, avg = self._group_stats(self.model.groups)
info["Max. group size:"] = str(mx)
info["Min. group size:"] = str(mn)
info["Avg. group size:"] = str(avg)
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param = param.rename(columns={"Coef.": "log HR",
"Std.Err.": "log HR SE"})
param.insert(2, "HR", np.exp(param["log HR"]))
a = "[%.3f" % (alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
a = "%.3f]" % (1 - alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
if xname != None:
param.index = xname
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
smry.add_text("Confidence intervals are for the hazard ratios")
if self.model.groups is not None:
smry.add_text("Standard errors account for dependence within groups")
if hasattr(self, "regularized"):
smry.add_text("Standard errors do not account for the regularization")
return smry
class rv_discrete_float(object):
"""
A class representing a collection of discrete distributions.
Parameters
----------
xk : 2d array-like
The support points, should be non-decreasing within each
row.
pk : 2d array-like
The probabilities, should sum to one within each row.
Notes
-----
Each row of `xk`, and the corresponding row of `pk` describe a
discrete distribution.
`xk` and `pk` should both be two-dimensional ndarrays. Each row
of `pk` should sum to 1.
This class is used as a substitute for scipy.distributions.
rv_discrete, since that class does not allow non-integer support
points, or vectorized operations.
Only a limited number of methods are implemented here compared to
the other scipy distribution classes.
"""
def __init__(self, xk, pk):
self.xk = xk
self.pk = pk
self.cpk = np.cumsum(self.pk, axis=1)
def rvs(self):
"""
Returns a random sample from the discrete distribution.
A vector is returned containing a single draw from each row of
`xk`, using the probabilities of the corresponding row of `pk`
"""
n = self.xk.shape[0]
u = np.random.uniform(size=n)
ix = (self.cpk < u[:, None]).sum(1)
ii = np.arange(n, dtype=np.int32)
return self.xk[(ii,ix)]
def mean(self):
"""
Returns a vector containing the mean values of the discrete
distributions.
A vector is returned containing the mean value of each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
return (self.xk * self.pk).sum(1)
def var(self):
"""
Returns a vector containing the variances of the discrete
distributions.
A vector is returned containing the variance for each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
mn = self.mean()
xkc = self.xk - mn[:, None]
return (self.pk * (self.xk - xkc)**2).sum(1)
def std(self):
"""
Returns a vector containing the standard deviations of the
discrete distributions.
A vector is returned containing the standard deviation for
each row of `xk`, using the probabilities in the corresponding
row of `pk`.
"""
return np.sqrt(self.var())
def _opt_1d(funcs, start, L1_wt, tol):
"""
Optimize a L1-penalized smooth one-dimensional function of a
single variable.
Parameters
----------
funcs : tuple of functions
funcs[0] is the objective function to be minimized. funcs[1]
and funcs[2] are, respectively, the first and second
derivatives of the smooth part of funcs[0] (i.e. excluding the
L1 penalty).
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
Returns
-------
The argmin of the objective function.
"""
# TODO: can we detect failures without calling funcs[0] twice?
x = start
f = funcs[0](x)
b = funcs[1](x)
c = funcs[2](x)
d = b - c*x
if L1_wt > np.abs(d):
return 0.
elif d >= 0:
x += (L1_wt - b) / c
elif d < 0:
x -= (L1_wt + b) / c
f1 = funcs[0](x)
# This is an expensive fall-back if the quadratic
# approximation is poor and sends us far off-course.
if f1 > f + 1e-10:
return brent(funcs[0], brack=(x-0.2, x+0.2), tol=tol)
return x
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | neuralnilm/plot.py | 2 | 15323 | from __future__ import division, print_function
import matplotlib
matplotlib.rcParams.update({'font.size': 8})
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy as np
import h5py
from scipy.stats import norm
def plot_activations(filename, epoch, seq_i=0, normalise=False):
f = h5py.File(filename, mode='r')
epoch_name = 'epoch{:06d}'.format(epoch)
epoch_group = f[epoch_name]
for layer_name, layer_activations in epoch_group.iteritems():
print(layer_activations[seq_i, :, :].transpose().shape)
activations = layer_activations[seq_i, :, :]
if normalise:
activations /= activations.max(axis=0)
plt.imshow(
activations.transpose(), aspect='auto', interpolation='none')
break
class Plotter(object):
def __init__(self, n_seq_to_plot=10, n_training_examples_to_plot=4,
net=None):
self.n_seq_to_plot = n_seq_to_plot
self.linewidth = 0.2
self.save = True
self.seq_i = 0
self.plot_additional_seqs = 0
self.net = net
self.ylim = None # Set by the user while code is running.
self.n_training_examples_to_plot = n_training_examples_to_plot
@property
def target_labels(self):
return self.net.source.get_labels() if self.net is not None else []
def plot_all(self):
self.plot_costs()
self.plot_estimates()
def plot_costs(self):
fig, ax = plt.subplots(1)
n_iterations = len(self.net.training_costs)
SIZE = 2
# Check for source_i metadata
if (self.net.training_costs_metadata and
'source_i' in self.net.training_costs_metadata[0]):
source_i_list = [
int(metadata['source_i'])
for metadata in self.net.training_costs_metadata]
TRAIN_COLOR_MAP = {0: 'r', 1: 'b'}
train_color = [
TRAIN_COLOR_MAP[source_i] for source_i in source_i_list]
else:
train_color = 'b'
# Plot training costs
train_x = np.arange(0, n_iterations)
ax.scatter(train_x, self.net.training_costs, label='Training',
c=train_color, alpha=0.2, s=SIZE, linewidths=0)
# Plot validation costs
validation_x = np.arange(0, n_iterations, self.net.validation_interval)
n_validations = min(len(validation_x), len(self.net.validation_costs))
ax.scatter(validation_x[:n_validations],
self.net.validation_costs[:n_validations],
label='Validation', c='g', s=SIZE, linewidths=0)
# Text and formatting
ax.set_xlim((0, n_iterations))
if self.ylim is None:
train_start_i = 100 if len(self.net.training_costs) > 1000 else 0
valid_start_i = 100 if len(self.net.validation_costs) > 1000 else 0
max_cost = max(max(self.net.training_costs[train_start_i:]),
max(self.net.validation_costs[valid_start_i:]))
min_cost = min(min(self.net.training_costs),
min(self.net.validation_costs))
ax.set_ylim((min_cost, max_cost))
else:
ax.set_ylim(self.ylim)
ax.set_xlabel('Iteration')
ax.set_ylabel('Cost')
ax.legend()
ax.grid(True)
self._save_or_display_fig(
'costs', fig, include_epochs=False, suffix='png', dpi=300)
return ax
def plot_estimates(self):
validation_batch = self.net.validation_batch
X, y = validation_batch.data
output = self.net.y_pred(X)
X, y, output = self._process(X, y, output)
sequences = range(min(self.net.n_seq_per_batch, self.n_seq_to_plot))
for seq_i in sequences:
self.seq_i = seq_i
fig, axes = self.create_estimates_fig(
X, y, output, validation_batch.target_power_timeseries,
metadata=validation_batch.metadata)
# Training examples
for batch_i in range(self.n_training_examples_to_plot):
self.seq_i = 0
train_batch = self.net.source.get()
X, y = train_batch.data
output = self.net.y_pred(X)
X, y, output = self._process(X, y, output)
fig, axes = self.create_estimates_fig(
X, y, output, train_batch.target_power_timeseries,
filename_string='train_estimates',
metadata=train_batch.metadata,
end_string=batch_i)
def _process(self, X, y, output):
return X, y, output
def create_estimates_fig(self, X, y, output, target_power_timeseries,
filename_string='estimates', metadata=None,
end_string=None):
fig, axes = plt.subplots(3)
self._plot_network_output(axes[0], output)
self._plot_target(axes[1], y, target_power_timeseries)
if metadata:
fig.text(
x=0.1, y=0.5, s=str(dict(metadata)), fontsize=6,
horizontalalignment='center', verticalalignment='center')
self._plot_input(axes[2], X)
for ax in axes:
ax.grid(True)
end_string = self.seq_i if end_string is None else end_string
self._save_or_display_fig(filename_string, fig, end_string=end_string)
return fig, axes
def _save_or_display_fig(self, string, fig, dpi=None,
include_epochs=True, end_string="", suffix="pdf"):
fig.tight_layout()
if not self.save:
plt.show(block=True)
return
end_string = str(end_string)
filename = (
self.net.experiment_name +
("_" if self.net.experiment_name else "") +
string +
("_{:d}epochs".format(self.net.n_iterations())
if include_epochs else "") +
("_" if end_string else "") + end_string +
"." + suffix)
plt.savefig(filename, bbox_inches='tight', dpi=dpi)
plt.close(fig)
def _plot_network_output(self, ax, output):
ax.set_title('Network output')
ax.plot(output[self.seq_i, :, :], linewidth=self.linewidth)
n = len(output[self.seq_i, :, :])
ax.set_xlim([0, n])
def _plot_target(self, ax, y, target_power_timeseries):
ax.set_title('Target')
ax.plot(y[self.seq_i, :, :], linewidth=self.linewidth)
# alpha: lower = more transparent
ax.legend(self.target_labels, fancybox=True,
framealpha=0.5, prop={'size': 6})
n = len(y[self.seq_i, :, :])
ax.set_xlim([0, n])
def _plot_input(self, ax, X):
ax.set_title('Network input')
CHANNEL = 0
if self.net is None:
data = X[self.seq_i, :, CHANNEL]
elif hasattr(self.net.source, 'inside_padding'):
start, end = self.net.source.inside_padding()
data = X[self.seq_i, start:end, CHANNEL]
else:
data = X[self.seq_i, :, CHANNEL]
ax.plot(data, linewidth=self.linewidth)
ax.set_xlim([0, data.shape[0]])
class MDNPlotter(Plotter):
def __init__(self, net=None, seq_length=None):
super(MDNPlotter, self).__init__(net)
self.seq_length = (self.net.source.output_shape()[1]
if seq_length is None else seq_length)
def create_estimates_fig(self, X, y, output):
n_outputs = output.shape[2]
fig, axes = plt.subplots(2 + n_outputs, figsize=(8, 11))
self._plot_input(axes[0], X)
self._plot_target(axes[1], y)
for output_i in range(n_outputs):
ax = axes[2 + output_i]
self._plot_network_output(ax, output_i, output, y)
for ax in axes:
ax.grid(False)
self._save_or_display_fig('estimates', fig, end_string=self.seq_i)
return fig, axes
def _plot_network_output(self, ax, output_i, output, target):
title = 'Network output density'
if self.target_labels:
title += ' for {}'.format(self.target_labels[output_i])
ax.set_title(title)
output = output[self.seq_i, :, output_i, :, :]
target = target[self.seq_i, :, output_i]
mu = output[:, :, 0]
sigma = output[:, :, 1]
mixing = output[:, :, 2]
y_extra = max(target.ptp() * 0.2, mu.ptp() * 0.2)
y_lim = (min(target.min(), mu.min()) - y_extra,
max(target.max(), mu.max()) + y_extra)
x_lim = (0, self.seq_length)
gmm_heatmap(ax, (mu, sigma, mixing), x_lim, y_lim)
# plot means
n_components = mu.shape[-1]
for component_i in range(n_components):
ax.plot(mu[:, component_i], color='red', linewidth=0.5, alpha=0.5)
# plot target
ax.plot(target, color='green', linewidth=0.5, alpha=0.5)
# set limits
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
def _process(self, X, y, output, target_shape=None):
if target_shape is None:
target_shape = self.net.source.output_shape()
y_reshaped = y.reshape(target_shape)
output_reshaped = output.reshape(target_shape + output.shape[2:])
return X, y_reshaped, output_reshaped
class CentralOutputPlotter(Plotter):
def _plot_network_output(self, ax, output):
ax.set_title('Network output')
n_outputs = output.shape[2]
ax.bar(range(n_outputs), output[self.seq_i, 0, :])
def _plot_target(self, ax, y, target_power_timeseries):
ax.set_title('Target')
n_outputs = y.shape[2]
ax.bar(range(n_outputs), y[self.seq_i, 0, :])
ax.set_xticklabels(self.target_labels)
class RectangularOutputPlotter(Plotter):
def __init__(self, *args, **kwargs):
self.cumsum = kwargs.pop('cumsum', False)
super(RectangularOutputPlotter, self).__init__(*args, **kwargs)
def _plot_network_output(self, ax, output):
self._plot_scatter(ax, output, 'Network output')
def _plot_target(self, ax, y, target_power_timeseries):
self._plot_scatter(ax, y, 'Target')
def _plot_scatter(self, ax, data, title):
example = data[self.seq_i, :, 0]
if self.cumsum:
example = np.cumsum(example)
y_values = [0] * len(example)
ax.scatter(example, y_values)
ax.set_xlim((0, 1))
ax.set_title(title)
class StartEndMeanPlotter(Plotter):
def __init__(self, *args, **kwargs):
self.max_target_power = kwargs.pop('max_target_power', 100)
super(StartEndMeanPlotter, self).__init__(*args, **kwargs)
def _plot_target(self, ax, y, target_power_timeseries):
# Plot time series.
seq_length, n_outputs = target_power_timeseries.shape[1:3]
colors = get_colors(n_outputs)
for output_i in range(n_outputs):
ax.plot(target_power_timeseries[self.seq_i, :, output_i],
linewidth=self.linewidth,
c=colors[output_i],
label=self.target_labels[output_i])
# Legend: lower alpha = more transparent.
ax.legend(fancybox=True, framealpha=0.5, prop={'size': 6})
ax.set_title('Target')
# Rectangles.
plot_rectangles(ax, y, seq_i=self.seq_i, plot_seq_width=seq_length)
ax.set_xlim((0, seq_length))
def _plot_network_output(self, ax, output):
plot_rectangles(ax, output, self.seq_i)
ax.set_xlim((0, 1))
ax.set_title('Network output')
def plot_rectangles(ax, batch, seq_i=0, plot_seq_width=1, offset=0, alpha=0.5):
"""
Parameters
----------
ax : matplotlib axes
batch : numpy.ndarray
Shape = (n_seq_per_batch, 3, n_outputs)
seq_i : int, optional
Index into the first dimension of `batch`.
plot_seq_width : int or float, optional
The width of a sequence plotted on the X-axis.
Multiply `left` and `right` values by `plot_seq_width` before plotting.
offset : float, optional
Shift rectangles left or right by `offset` where one complete sequence
is of length `plot_seq_width`. i.e. to move rectangles half a plot
width right, set `offset` to `plot_seq_width / 2.0`.
alpha : float, optional
[0, 1]. Transparency for the rectangles.
"""
# sanity check
for obj in [seq_i, plot_seq_width, offset, alpha]:
if not isinstance(obj, (int, float)):
raise ValueError("Incorrect input: {}".format(obj))
n_outputs = batch.shape[2]
colors = get_colors(n_outputs)
for output_i in range(n_outputs):
single_output = batch[seq_i, :, output_i]
left = (single_output[0] * plot_seq_width) + offset
height = single_output[2]
width = (single_output[1] - single_output[0]) * plot_seq_width
color = colors[output_i]
ax.bar(left, height, width, alpha=alpha, color=color, edgecolor=color)
def plot_disaggregate_start_stop_end(rectangles, ax=None, alpha=0.5):
"""
Parameters
----------
rectangles : dict
output from neuralnilm.disaggregate.disaggregate_start_stop_end
ax : matplotlib.axes.Axes, optional
alpha : float, [0, 1]
Returns
-------
ax
"""
if ax is None:
ax = plt.gca()
n_outputs = len(rectangles.keys())
colors = get_colors(n_outputs)
for output_i, rects in rectangles.iteritems():
color = colors[output_i]
for rectangle in rects:
width = rectangle.right - rectangle.left
ax.bar(rectangle.left, rectangle.height, width,
alpha=alpha, color=color, edgecolor=color)
return ax
def plot_rectangles_matrix(matrix):
import matplotlib.pyplot as plt
plt.imshow(matrix, aspect='auto', interpolation='none', origin='lower')
plt.show()
def get_colors(n):
return [c for c in cm.rainbow(np.linspace(0, 1, n))]
def gmm_pdf(theta, x):
"""
Parameters
----------
theta : tuple of (mu, sigma, mixing)
"""
pdf = None
for mu, sigma, mixing in zip(*theta):
norm_pdf = norm.pdf(x=x, loc=mu, scale=sigma)
norm_pdf *= mixing
if pdf is None:
pdf = norm_pdf
else:
pdf += norm_pdf
return pdf
def gmm_heatmap(ax, thetas, x_lim, y_lim, normalise=False,
cmap=matplotlib.cm.Blues):
"""
Parameters
----------
thetas : tuple of (array of mus, array of sigmas, array of mixing)
y_lim, x_lim : each is a 2-tuple of numbers
"""
N_X = 200
n_y = len(thetas[0])
x_lim = (x_lim[0] - 0.5, x_lim[1] - 0.5)
extent = x_lim + y_lim # left, right, bottom, top
x = np.linspace(y_lim[0], y_lim[1], N_X)
img = np.zeros(shape=(N_X, n_y))
i = 0
for i, (mu, sigma, mixing) in enumerate(zip(*thetas)):
img[:, i] = gmm_pdf((mu, sigma, mixing), x)
if normalise:
img[:, i] /= np.max(img[:, i])
ax.imshow(img, interpolation='none', extent=extent, aspect='auto',
origin='lower', cmap=cmap)
return ax
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/neuralnilm_prototype/plot.py /mnt/sshfs/imperial/workspace/python/neuralnilm/neuralnilm_prototype/"
End:
"""
| mit |
jwiggins/scikit-image | skimage/viewer/plugins/overlayplugin.py | 40 | 3615 | from warnings import warn
from ...util.dtype import dtype_range
from .base import Plugin
from ..utils import ClearColormap, update_axes_image
import six
from ..._shared.version_requirements import is_installed
__all__ = ['OverlayPlugin']
class OverlayPlugin(Plugin):
"""Plugin for ImageViewer that displays an overlay on top of main image.
The base Plugin class displays the filtered image directly on the viewer.
OverlayPlugin will instead overlay an image with a transparent colormap.
See base Plugin class for additional details.
Attributes
----------
overlay : array
Overlay displayed on top of image. This overlay defaults to a color map
with alpha values varying linearly from 0 to 1.
color : int
Color of overlay.
"""
colors = {'red': (1, 0, 0),
'yellow': (1, 1, 0),
'green': (0, 1, 0),
'cyan': (0, 1, 1)}
def __init__(self, **kwargs):
if not is_installed('matplotlib', '>=1.2'):
msg = "Matplotlib >= 1.2 required for OverlayPlugin."
warn(RuntimeWarning(msg))
super(OverlayPlugin, self).__init__(**kwargs)
self._overlay_plot = None
self._overlay = None
self.cmap = None
self.color_names = sorted(list(self.colors.keys()))
def attach(self, image_viewer):
super(OverlayPlugin, self).attach(image_viewer)
#TODO: `color` doesn't update GUI widget when set manually.
self.color = 0
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
ax = self.image_viewer.ax
if image is None:
ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
vmin, vmax = dtype_range[image.dtype.type]
self._overlay_plot = ax.imshow(image, cmap=self.cmap,
vmin=vmin, vmax=vmax)
else:
update_axes_image(self._overlay_plot, image)
if self.image_viewer.useblit:
self.image_viewer._blit_manager.background = None
self.image_viewer.redraw()
@property
def color(self):
return self._color
@color.setter
def color(self, index):
# Update colormap whenever color is changed.
if isinstance(index, six.string_types) and \
index not in self.color_names:
raise ValueError("%s not defined in OverlayPlugin.colors" % index)
else:
name = self.color_names[index]
self._color = name
rgb = self.colors[name]
self.cmap = ClearColormap(rgb)
if self._overlay_plot is not None:
self._overlay_plot.set_cmap(self.cmap)
self.image_viewer.redraw()
@property
def filtered_image(self):
"""Return filtered image.
This "filtered image" is used when saving from the plugin.
"""
return self.overlay
def display_filtered_image(self, image):
"""Display filtered image as an overlay on top of image in viewer."""
self.overlay = image
def closeEvent(self, event):
# clear overlay from ImageViewer on close
self.overlay = None
super(OverlayPlugin, self).closeEvent(event)
def output(self):
"""Return the overlaid image.
Returns
-------
overlay : array, same shape as image
The overlay currently displayed.
data : None
"""
return (self.overlay, None)
| bsd-3-clause |
vigilv/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
mayblue9/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
ZENGXH/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/examples/event_handling/viewlims.py | 6 | 2880 | # Creates two identical panels. Zooming in on the right panel will show
# a rectangle in the first panel, denoting the zoomed region.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# We just subclass Rectangle so that it can be called with an Axes
# instance, causing the rectangle to update its shape to match the
# bounds of the Axes
class UpdatingRect(Rectangle):
def __call__(self, ax):
self.set_bounds(*ax.viewLim.bounds)
ax.figure.canvas.draw_idle()
# A class that will regenerate a fractal set as we zoom in, so that you
# can actually see the increasing detail. A box in the left panel will show
# the area to which we are zoomed.
class MandlebrotDisplay(object):
def __init__(self, h=500, w=500, niter=50, radius=2., power=2):
self.height = h
self.width = w
self.niter = niter
self.radius = radius
self.power = power
def __call__(self, xstart, xend, ystart, yend):
self.x = np.linspace(xstart, xend, self.width)
self.y = np.linspace(ystart, yend, self.height).reshape(-1,1)
c = self.x + 1.0j * self.y
threshold_time = np.zeros((self.height, self.width))
z = np.zeros(threshold_time.shape, dtype=np.complex)
mask = np.ones(threshold_time.shape, dtype=np.bool)
for i in range(self.niter):
z[mask] = z[mask]**self.power + c[mask]
mask = (np.abs(z) < self.radius)
threshold_time += mask
return threshold_time
def ax_update(self, ax):
ax.set_autoscale_on(False) # Otherwise, infinite loop
#Get the number of points from the number of pixels in the window
dims = ax.axesPatch.get_window_extent().bounds
self.width = int(dims[2] + 0.5)
self.height = int(dims[2] + 0.5)
#Get the range for the new area
xstart,ystart,xdelta,ydelta = ax.viewLim.bounds
xend = xstart + xdelta
yend = ystart + ydelta
# Update the image object with our new data and extent
im = ax.images[-1]
im.set_data(self.__call__(xstart, xend, ystart, yend))
im.set_extent((xstart, xend, ystart, yend))
ax.figure.canvas.draw_idle()
md = MandlebrotDisplay()
Z = md(-2., 0.5, -1.25, 1.25)
fig1, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black')
rect.set_bounds(*ax2.viewLim.bounds)
ax1.add_patch(rect)
# Connect for changing the view limits
ax2.callbacks.connect('xlim_changed', rect)
ax2.callbacks.connect('ylim_changed', rect)
ax2.callbacks.connect('xlim_changed', md.ax_update)
ax2.callbacks.connect('ylim_changed', md.ax_update)
plt.show()
| apache-2.0 |
PierreF/influxdb-python | influxdb/tests/influxdb08/dataframe_client_test.py | 8 | 12409 | # -*- coding: utf-8 -*-
"""
unit tests for misc module
"""
from .client_test import _mocked_session
import unittest
import json
import requests_mock
from nose.tools import raises
from datetime import timedelta
from influxdb.tests import skipIfPYpy, using_pypy
import copy
import warnings
if not using_pypy:
import pandas as pd
from pandas.util.testing import assert_frame_equal
from influxdb.influxdb08 import DataFrameClient
@skipIfPYpy
class TestDataFrameClient(unittest.TestCase):
def setUp(self):
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
def test_write_points_from_dataframe(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_float_nan(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[[1, float("NaN"), 1.0], [2, 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
[1, None, 1.0, 0],
[2, 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_in_batches(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
self.assertTrue(cli.write_points({"foo": dataframe}, batch_size=1))
def test_write_points_from_dataframe_with_numeric_column_names(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
# df with numeric column names
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ['0', '1', '2', "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_period_index(self):
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[pd.Period('1970-01-01'),
pd.Period('1970-01-02')],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 86400]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_time_precision(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
points_ms = copy.deepcopy(points)
points_ms[0]["points"][1][-1] = 3600 * 1000
points_us = copy.deepcopy(points)
points_us[0]["points"][1][-1] = 3600 * 1000000
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe}, time_precision='s')
self.assertListEqual(json.loads(m.last_request.body), points)
cli.write_points({"foo": dataframe}, time_precision='m')
self.assertListEqual(json.loads(m.last_request.body), points_ms)
cli.write_points({"foo": dataframe}, time_precision='u')
self.assertListEqual(json.loads(m.last_request.body), points_us)
@raises(TypeError)
def test_write_points_from_dataframe_fails_without_time_index(self):
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
@raises(TypeError)
def test_write_points_from_dataframe_fails_with_series(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.Series(data=[1.0, 2.0],
index=[now, now + timedelta(hours=1)])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
def test_query_into_dataframe(self):
data = [
{
"name": "foo",
"columns": ["time", "sequence_number", "column_one"],
"points": [
[3600, 16, 2], [3600, 15, 1],
[0, 14, 2], [0, 13, 1]
]
}
]
# dataframe sorted ascending by time first, then sequence_number
dataframe = pd.DataFrame(data=[[13, 1], [14, 2], [15, 1], [16, 2]],
index=pd.to_datetime([0, 0,
3600, 3600],
unit='s', utc=True),
columns=['sequence_number', 'column_one'])
with _mocked_session('get', 200, data):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
assert_frame_equal(dataframe, result)
def test_query_multiple_time_series(self):
data = [
{
"name": "series1",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, 323048, 323048, 323048, 0]]
},
{
"name": "series2",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, -2.8233, -2.8503, -2.7832, 0.0173]]
},
{
"name": "series3",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, -0.01220, -0.01220, -0.01220, 0]]
}
]
dataframes = {
'series1': pd.DataFrame(data=[[323048, 323048, 323048, 0]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev']),
'series2': pd.DataFrame(data=[[-2.8233, -2.8503, -2.7832, 0.0173]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev']),
'series3': pd.DataFrame(data=[[-0.01220, -0.01220, -0.01220, 0]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev'])
}
with _mocked_session('get', 200, data):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query("""select mean(value), min(value), max(value),
stddev(value) from series1, series2, series3""")
self.assertEqual(dataframes.keys(), result.keys())
for key in dataframes.keys():
assert_frame_equal(dataframes[key], result[key])
def test_query_with_empty_result(self):
with _mocked_session('get', 200, []):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
self.assertEqual(result, [])
def test_list_series(self):
response = [
{
'columns': ['time', 'name'],
'name': 'list_series_result',
'points': [[0, 'seriesA'], [0, 'seriesB']]
}
]
with _mocked_session('get', 200, response):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
series_list = cli.get_list_series()
self.assertEqual(series_list, ['seriesA', 'seriesB'])
def test_datetime_to_epoch(self):
timestamp = pd.Timestamp('2013-01-01 00:00:00.000+00:00')
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
self.assertEqual(
cli._datetime_to_epoch(timestamp),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='s'),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='m'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='ms'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='u'),
1356998400000000.0
)
| mit |
khkaminska/bokeh | examples/interactions/interactive_bubble/data.py | 49 | 1265 | import numpy as np
from bokeh.palettes import Spectral6
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
| bsd-3-clause |
alamillac/master_AI_tesis | Code/src/datasetGenerator.py | 1 | 13645 | from pandas import read_csv, DataFrame, Series
from errors import InvalidGroupError, MaxInvalidIterationsError
import numpy as np
import random
import logging
logger = logging.getLogger('datasetGenerator')
logger.setLevel(logging.DEBUG)
class DatasetGenerator(object):
def __init__(self, filenameDataset, seed=None):
logger.debug("Reading file: %s" % filenameDataset)
self.data = read_csv(filenameDataset)
if seed:
random.seed(seed)
def filterDataset(self, num_ratings=20):
logger.debug("Filtering users with more than %d rated movies" % num_ratings)
size_users = self.data.groupby('userId').size()
valid_users = [user_id for user_id in size_users.index if size_users[user_id] >= num_ratings]
ratings = self.data[self.data.userId.isin(valid_users)]
return ratings
def getOptimumDataset(self, best_users=1000, best_movies=1000):
# Get the (best_movies) most rated movies
logger.debug("Filtering %d most rated movies" % best_movies)
size_most_rated_movies = self.data.groupby('movieId').size().sort_values(ascending=False).head(best_movies)
most_rated_movies = size_most_rated_movies.index.values
# Get all the ratings of (best_users) most rated movies
logger.debug("Getting ratings of %d most rated movies" % best_movies)
ratings = self.data[self.data.movieId.isin(most_rated_movies)]
# Get the (best_users) users with more rated top (best_movies) movies
logger.debug("Getting %d users with more rated movies" % best_users)
size_users_with_more_ratings = ratings.groupby('userId').size().sort_values(ascending=False).head(best_users)
users_with_more_ratings = size_users_with_more_ratings.index.values
# Finally get the final ratings
logger.debug("Getting output ratings")
ratings = self.data[self.data.movieId.isin(most_rated_movies) & self.data.userId.isin(users_with_more_ratings)]
return ratings
def getOptimumDatasetPercentage(self, percentage=0.6):
def n_samples(samples):
return int(len(samples) * percentage)
if percentage <= 0:
return None
if percentage >= 1:
return self.data
num_users = n_samples(self.data.userId.unique())
num_movies = n_samples(self.data.movieId.unique())
return self.getOptimumDataset(best_users=num_users, best_movies=num_movies)
def getDatasetPercentage(self, percentage=1):
def n_samples(samples):
return int(len(samples) * percentage)
if percentage <= 0:
return None
if percentage >= 1:
return self.data
user_ids = self.data.userId.unique()
movie_ids = self.data.movieId.unique()
return self.getDataset(n_samples(user_ids), n_samples(movie_ids))
def getDataset(self, num_users, num_movies=None, more_rated_movies=None):
user_ids = self.data.userId.unique()
sample_user_ids = random.sample(user_ids, num_users)
# get movies rated by sample_users
ratings_of_sample_users = self.data[self.data.userId.isin(sample_user_ids)]
if not num_movies:
return ratings_of_sample_users
movie_ids = ratings_of_sample_users.movieId.unique()
if num_movies >= len(movie_ids):
logger.warning("There is not enough movies. Returning %d movies", len(movie_ids))
return ratings_of_sample_users
if more_rated_movies:
# More rated movies samples
size_most_rated_movies = ratings_of_sample_users.groupby('movieId').size().sort_values(ascending=False).head(num_movies)
sample_movie_ids = size_most_rated_movies.index.values
else:
# Random movie samples
sample_movie_ids = random.sample(movie_ids, num_movies)
ratings = ratings_of_sample_users[ratings_of_sample_users.movieId.isin(sample_movie_ids)]
return ratings
def getCoRatedMovies(self, ratings, users_id):
if len(users_id) == 0:
return set()
co_rated_movies = set(ratings.movieId.unique())
for user_id in users_id:
movies_rated_by_user = set(ratings[ratings.userId.isin([user_id])].movieId.unique())
co_rated_movies.intersection_update(movies_rated_by_user)
return co_rated_movies
def getMostSimilarUsers(self, ratings, user_id, num_users):
"""
Get the $num_users most similars to $user_id in $ratings dataset
"""
logger.debug("Getting %d most similar users for %s", num_users, user_id)
distances = self.getDistances(ratings, user_id)
similar_users = distances.sort_values().head(num_users).index
if len(self.getCoRatedMovies(ratings, similar_users)) < 10:
raise InvalidGroupError()
return list(similar_users)
def getMostDisimilarUsers(self, ratings, user_id, num_users):
"""
Get the $num_users most disimilars to $user_id in $ratings dataset
"""
logger.debug("Getting %d most disimilar users for %s", num_users, user_id)
distances = self.getDistances(ratings, user_id)
disimilar_users = distances.sort_values(ascending=False).head(num_users).index
if len(self.getCoRatedMovies(ratings, disimilar_users)) < 10:
raise InvalidGroupError()
return list(disimilar_users)
def getRandomUsers(self, ratings, num_users):
logger.debug("Getting %d random users", num_users)
user_ids = ratings.userId.unique()
if len(user_ids) <= num_users:
sample_user_ids = user_ids
else:
sample_user_ids = random.sample(user_ids, num_users)
if len(self.getCoRatedMovies(ratings, sample_user_ids)) < 10:
raise InvalidGroupError()
return list(sample_user_ids)
def getGroupUsersFn(self, ratings, num_groups, size, selectFunction):
remaining_dataset = ratings
num_generated_groups = 0
num_invalid = 0
while num_generated_groups < num_groups:
try:
group = selectFunction(remaining_dataset, size)
num_invalid = 0
num_generated_groups += 1
remaining_dataset = remaining_dataset[~remaining_dataset.userId.isin(group)]
yield group
except InvalidGroupError:
num_invalid += 1
logger.warning("Invalid group iteration %d", num_invalid)
if num_invalid > 20:
raise MaxInvalidIterationsError()
def getGroupUsers(self, ratings, num_groups, size):
def reduceRatings(ratings, user_id):
# Reduce the number of ratings
logger.debug("Reducing ratings for user %s", user_id)
filtered_ratings = self.filterCoRatedMovies(ratings, user_id)
size_filtered_users = filtered_ratings.groupby('userId').size().sort_values(ascending=False).head(10000)
filtered_user_ids = size_filtered_users.index.values
filtered_ratings = filtered_ratings[filtered_ratings.userId.isin(filtered_user_ids)]
logger.debug("Ratings reduced from %d to %d", len(ratings), len(filtered_ratings))
return filtered_ratings
def selectSimilarGroup(ratings, num_users):
user_ids = ratings.userId.unique()
user_id = random.choice(user_ids)
reduced_ratings = reduceRatings(ratings, user_id)
return self.getMostSimilarUsers(reduced_ratings, user_id, num_users)
def selectDisimilarGroup(ratings, num_users):
user_ids = ratings.userId.unique()
user_id = random.choice(user_ids)
reduced_ratings = reduceRatings(ratings, user_id)
return self.getMostDisimilarUsers(reduced_ratings, user_id, num_users)
def selectRandomGroup(ratings, num_users):
user_ids = ratings.userId.unique()
user_id = random.choice(user_ids)
reduced_ratings = reduceRatings(ratings, user_id)
return self.getRandomUsers(reduced_ratings, num_users)
try:
for group in self.getGroupUsersFn(ratings, num_groups, size, selectSimilarGroup):
yield group, 'similar'
except:
logger.warning("Max iteration errors")
try:
for group in self.getGroupUsersFn(ratings, num_groups, size, selectDisimilarGroup):
yield group, 'disimilar'
except:
logger.warning("Max iteration errors")
try:
for group in self.getGroupUsersFn(ratings, num_groups, size, selectRandomGroup):
yield group, 'random'
except:
logger.warning("Max iteration errors")
def filterCoRatedMovies(self, ratings, user_id):
movies_rated_by_user = ratings[ratings.userId.isin([user_id])].movieId.unique()
return ratings[ratings.movieId.isin(movies_rated_by_user)]
def getDistances(self, ratings, user_id):
matrix = self.getMatrix(self.filterCoRatedMovies(ratings, user_id))
logger.debug("Getting distances for user %s", user_id)
distances = []
users = []
num_rated_movies = matrix.loc[user_id].count()
for user_idx in matrix.index:
common_rated_movies = np.count_nonzero(~(matrix.loc[user_idx].isnull() | matrix.loc[user_id].isnull()))
if common_rated_movies > 5: # It should have more than 5 common rated movies to get a valid distance
distance = abs(matrix.loc[user_idx] - matrix.loc[user_id]).sum()
normalized_distance = distance * num_rated_movies / common_rated_movies
else:
normalized_distance = np.nan
# Save distance between user_idx and user_id
distances.append(normalized_distance)
users.append(user_idx)
return Series(distances, index=users)
def getMatrix(self, ratings):
logger.debug("Creating matrix data")
userIds = sorted(ratings.userId.unique())
movieIds = sorted(ratings.movieId.unique())
matrix = DataFrame([[np.nan] * len(movieIds) for i in xrange(len(userIds))], columns=movieIds, index=userIds) # initialize matrix with nan values
# Fill matrix
i = 0
rows = ratings.iterrows()
percentage_10 = len(ratings) / 10
percentage_done = 0
for row in rows:
userId = row[1].userId
movieId = row[1].movieId
rating = row[1].rating
matrix.ix[userId, movieId] = rating
if i == percentage_done * percentage_10:
logger.debug("{0:.0f}% done".format(percentage_done * 10))
percentage_done += 1
i += 1
return matrix
def getStatsFromDataset(self, dataset):
num_users = len(dataset.userId.unique())
num_movies = len(dataset.movieId.unique())
count_ratings_by_users = dataset.groupby('userId')['rating'].count()
count_ratings_by_movies = dataset.groupby('movieId')['rating'].count()
return {
"numUsers": num_users,
"numMovies": num_movies,
"numRatings": len(dataset),
"meanRatingsByUsers": count_ratings_by_users.mean(),
"meanRatingsByMovies": count_ratings_by_movies.mean(),
"standardDeviationRatingsByUsers": count_ratings_by_users.std(),
"standardDeviationRatingsByMovies": count_ratings_by_movies.std(),
"countRatingsByUsers": count_ratings_by_users,
"countRatingsByMovies": count_ratings_by_movies,
"histRatingsByUsers": np.histogram(count_ratings_by_users),
"histRatingsByMovies": np.histogram(count_ratings_by_movies)
}
def evaluateConcensusFns(self, ratings, group, concensusFns, n_success=3):
def successN(model1, model2, n):
n_movies1_id = set(model1.sort_values(ascending=False).head(n).index)
n_movies2_id = set(model2.sort_values(ascending=False).head(n).index)
return 1 if len(n_movies1_id.intersection(n_movies2_id)) > 0 else 0
def unsuccessN(model1, model2, n):
n_movies1_id = set(model1.sort_values(ascending=False).head(n).index)
n_movies2_id = set(model2.sort_values(ascending=False).tail(n).index)
return 1 if len(n_movies1_id.intersection(n_movies2_id)) > 0 else 0
def evaluate(concensusFn, group_matrix):
concensus_model = concensusFn(group_matrix)
success_array = []
unsuccess_array = []
for i in range(group_matrix.shape[0]):
user_model = group_matrix.iloc[i]
success_array.append(
successN(concensus_model, user_model, n_success)
)
unsuccess_array.append(
unsuccessN(concensus_model, user_model, n_success)
)
return np.mean(success_array) * 100, np.mean(unsuccess_array) * 100
# Filter ratings to only co-rated movies by all users in group
co_rated_movies = self.getCoRatedMovies(ratings, group)
group_ratings = ratings[ratings.movieId.isin(co_rated_movies) & ratings.userId.isin(group)]
group_matrix = self.getMatrix(group_ratings)
for concensusObj in concensusFns:
evaluation_success, evaluation_unsuccess = evaluate(concensusObj['fn'], group_matrix)
yield concensusObj['name'], evaluation_success, evaluation_unsuccess
| mit |
rbharath/deepchem | examples/toxcast/processing/tox.py | 9 | 1668 |
#Processing of ToxCast data
#Author - Aneesh Pappu
import pandas as pd
import numpy as np
#Loading dataframes and editing indices
path_to_casn_smiles = "./casn_to_smiles.csv.gz"
path_to_code_casn = "./code_to_casn.csv.gz"
path_to_hitc_code = "./code_to_hitc.csv.gz"
casn_smiles_df = pd.read_csv(path_to_casn_smiles)
code_casn_df = pd.read_csv(path_to_code_casn)
hitc_code_df = pd.read_csv(path_to_hitc_code)
casn_smiles_df = casn_smiles_df[['Substance_CASRN', 'Structure_SMILES']]
code_casn_df = code_casn_df[['casn', 'code']]
hitc_code_df.rename(columns = {'Unnamed: 0': 'code'}, inplace = True)
casn_smiles_df.rename(columns = {'Substance_CASRN': 'casn', 'Structure_SMILES': 'smiles'}, inplace = True)
code_casn_df.set_index('code', inplace = True)
casn_smiles_df.set_index('casn', inplace= True)
#Loop through rows of hitc matrix and replace codes with smiles strings
badCounter = 0 #keep track of rows with no corresponding smiles strings
for index, data in hitc_code_df.iterrows():
rowList = data.values.tolist()
code = rowList[0]
#get corresponding casn
try:
casn = code_casn_df.loc[code, 'casn']
except KeyError:
badCounter+=1
pass
#get corresponding smiles
try:
smiles = casn_smiles_df.loc[casn, 'smiles']
except KeyError:
badCounter+=1
pass
#write to cell
hitc_code_df.loc[index, 'code'] = smiles
#Tidy up and write to csv
hitc_code_df.rename(columns = {'code': 'smiles'}, inplace = True)
hitc_code_df.dropna(subset = ['smiles'], inplace = True)
hitc_code_df.reset_index(inplace = True, drop = True)
hitc_code_df.to_csv("./reprocessed_tox_cast.csv", index=False)
| mit |
kalfasyan/DA224x | code/old code/parameters.py | 1 | 6311 | import random
import numpy as np
import itertools
import matplotlib.pylab as plt
from scipy import linalg as la
import time
from progressbar import *
from collections import Counter
import decimal
import math
import nest
nrn_type = "iaf_neuron"
exc_nrns_mc = 4
inh_nrns_mc = 1
lr_mc = 3
mc_hc = 4
hc = 3
nrns = (exc_nrns_mc+inh_nrns_mc)*hc*mc_hc*lr_mc
q = 1
sigma = math.sqrt(q/decimal.Decimal(nrns))
sigma2 = math.sqrt(1/decimal.Decimal(nrns))
mu = 0
nrns_hc = nrns/hc
nrns_mc = nrns_hc/mc_hc
nrns_l23 = nrns_mc*30/100
nrns_l4 = nrns_mc*20/100
nrns_l5 = nrns_mc*50/100
print nrns,"neurons."
print nrns_hc, "per hypercolumn in %s" %hc,"hypercolumns."
print nrns_mc, "per minicolumn in %s" %mc_hc,"minicolumns."
print nrns_l23, nrns_l4, nrns_l5, "in layers23 layer4 and layer5 respectively"
##############################################################
""" 2. Creating list of Hypercolumns, list of minicolumns within
hypercolumns, list of layers within minicolumns within
hypercolumns"""
split = [i for i in range(nrns)]
split_hc = zip(*[iter(split)]*nrns_hc)
split_mc = []
split_lr23,split_lr4,split_lr5 = [],[],[]
for i in range(len(split_hc)):
split_mc.append(zip(*[iter(split_hc[i])]*nrns_mc))
for j in range(len(split_mc[i])):
split_lr23.append(split_mc[i][j][0:nrns_l23])
split_lr4.append(split_mc[i][j][nrns_l23:nrns_l23+nrns_l4])
split_lr5.append(split_mc[i][j][nrns_l23+nrns_l4:])
split_exc,split_inh = [],[]
for i in range(len(split_lr23)):
split_exc.append(split_lr23[i][0:int(round(80./100.*(len(split_lr23[i]))))])
split_inh.append(split_lr23[i][int(round(80./100.*(len(split_lr23[i])))):])
for i in range(len(split_lr4)):
split_exc.append(split_lr4[i][0:int(round(80./100.*(len(split_lr4[i]))))])
split_inh.append(split_lr4[i][int(round(80./100.*(len(split_lr4[i])))):])
for i in range(len(split_lr5)):
split_exc.append(split_lr5[i][0:int(round(80./100.*(len(split_lr5[i]))))])
split_inh.append(split_lr5[i][int(round(80./100.*(len(split_lr5[i])))):])
##############################################################
""" 3. Creating sets for all minicolumns and all layers """
hypercolumns = set(split_hc)
minitemp = []
for i in range(len(split_mc)):
for j in split_mc[i]:
minitemp.append(j)
minicolumns = set(minitemp)
layers23 = set(list(itertools.chain.from_iterable(split_lr23)))
layers4 = set(list(itertools.chain.from_iterable(split_lr4)))
layers5 = set(list(itertools.chain.from_iterable(split_lr5)))
exc_nrns_set = set(list(itertools.chain.from_iterable(split_exc)))
inh_nrns_set = set(list(itertools.chain.from_iterable(split_inh)))
exc = [None for i in range(len(exc_nrns_set))]
inh = [None for i in range(len(inh_nrns_set))]
Nestrons = []
for i in range(nrns):
Nestrons.append(nest.Create(nrn_type))
#################### FUNCTIONS #####################################
""" Checks if 2 neurons belong in the same hypercolumn """
def same_hypercolumn(q,w):
for i in hypercolumns:
if q in i and w in i:
return True
return False
""" Checks if 2 neurons belong in the same minicolumn """
def same_minicolumn(q,w):
for mc in minicolumns:
if q in mc and w in mc:
return True
return False
""" Checks if 2 neurons belong in the same layer """
def same_layer(q,w):
if same_hypercolumn(q,w):
if q in layers23 and w in layers23:
return True
elif q in layers4 and w in layers4:
return True
elif q in layers5 and w in layers5:
return True
return False
def next_hypercolumn(q,w):
if same_hypercolumn(q,w):
return False
for i in range(len(split_hc)):
for j in split_hc[i]:
if j < len(split_hc):
if (q in split_hc[i] and w in split_hc[i+1]):
return True
return False
def prev_hypercolumn(q,w):
if same_hypercolumn(q,w):
return False
for i in range(len(split_hc)):
for j in split_hc[i]:
if i >0:
if (q in split_hc[i] and w in split_hc[i-1]):
return True
return False
def diff_hypercolumns(q,w):
if next_hypercolumn(q,w):
if (q in layers5 and w in layers4):
return flip(0.20,q)
elif prev_hypercolumn(q,w):
if (q in layers5 and w in layers23):
return flip(0.20,q)
return 0
def both_exc(q,w):
if same_layer(q,w):
if (q in exc_nrns_set and w in exc_nrns_set):
return True
return False
def both_inh(q,w):
if same_layer(q,w):
if (q in inh_nrns_set and w in inh_nrns_set):
return True
return False
""" Returns 1 under probability 'p', else 0 (0<=p<=1)"""
def flipAdj(p,q):
if q in exc_nrns_set:
return 1 if random.random() < p else 0
elif q in inh_nrns_set:
return -1 if random.random() < p else 0
def flip(p,q):
if q in exc_nrns_set:
return (np.random.normal(0,sigma)+.5) if random.random() < p else 0
elif q in inh_nrns_set:
return (np.random.normal(0,sigma)-.5) if random.random() < p else 0
def flip2(p,q):
a = decimal.Decimal(0.002083333)
if q in exc_nrns_set:
return (abs(np.random.normal(0,a))) if random.random() < p else 0
elif q in inh_nrns_set:
return (-abs(np.random.normal(0,a))) if random.random() < p else 0
def check_zero(z):
unique, counts = np.unique(z, return_counts=True)
occurence = np.asarray((unique, counts)).T
for i in range(len(z)):
if np.sum(z) != 0:
if len(occurence)==3 and occurence[0][1]>occurence[2][1]:
if z[i] == -1:
z[i] = 0
elif len(occurence)==3 and occurence[2][1]>occurence[0][1]:
if z[i] == 1:
z[i] = 0
elif len(occurence) < 3:
if z[i] == -1:
z[i] += 1
if z[i] == 1:
z[i] -= 1
else:
return z
def balance(l):
N = len(l)
meanP, meanN = 0,0
c1, c2 = 0,0
for i in range(N):
if l[i] > 0:
meanP += l[i]
c1+=1
if l[i] < 0:
meanN += l[i]
c2+=1
diff = abs(meanP)-abs(meanN)
for i in range(N):
if l[i] < 0:
l[i] -= diff/(c2)
return l
""" Total sum of conn_matrix weights becomes zero """
def balanceN(mat):
N = len(mat)
sumP,sumN = 0,0
c,c2=0,0
for i in range(N):
for j in range(N):
if mat[j][i] > 0:
sumP += mat[j][i]
c+=1
elif mat[j][i] < 0:
sumN += mat[j][i]
c2+=1
diff = sumP + sumN
for i in range(N):
for j in range(N):
if mat[j][i] < 0:
mat[j][i] -= diff/c2
""" Returns a counter 'c' in case a number 'n' is not (close to) zero """
def check_count(c, n):
if n <= -1e-4 or n>= 1e-4:
c+=1
return c | gpl-2.0 |
TakakiNishio/grasp_planning | rgb/random_planning/random_search_v2.py | 1 | 6420 | # -*- coding: utf-8 -*-
#python library
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import *
import sys
from scipy import misc
from PIL import Image, ImageDraw, ImageFont
import shutil
import os
import random
#chainer library
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer import serializers
#pygame library
import pygame
from pygame.locals import *
#python script
import network_structure as nn
import pickup_object as po
import path as p
# label preparation
def label_handling(data_label_1,data_label_2):
data_label = []
if data_label_1 < 10 :
data_label.append(str(0)+str(data_label_1))
else:
data_label.append(str(data_label_1))
if data_label_2 < 10 :
data_label.append(str(0)+str(data_label_2))
else:
data_label.append(str(data_label_2))
return data_label
# load picture data
def load_picture(path,scale):
img =Image.open(path)
resize_img = img.resize((img.size[0]/scale,img.size[1]/scale))
img_array = np.asanyarray(resize_img,dtype=np.float32)
img_shape = img_array.shape
img_array = np.reshape(img_array,(img_shape[2]*img_shape[1]*img_shape[0],1))
img_list = []
for i in range(len(img_array)):
img_list.append(img_array[i][0]/255.0)
return img_list
# generate grasp rectangle randomly
def random_rec(object_area,scale):
theta = random.uniform(-1,1) * np.pi
if len(object_area)==0:
area_index = 0
else:
area_index = randint(0,len(object_area))
xc_yc = []
xc = randint(object_area[area_index][0],object_area[area_index][0]+object_area[area_index][2])
yc = randint(object_area[area_index][1],object_area[area_index][1]+object_area[area_index][3])
xc_yc.append(xc)
xc_yc.append(yc)
a = randint(30,80)
#b = randint(10,30)
b = 20
x1 = a*np.cos(theta)-b*np.sin(theta)+xc
y1 = a*np.sin(theta)+b*np.cos(theta)+yc
x2 = -a*np.cos(theta)-b*np.sin(theta)+xc
y2 = -a*np.sin(theta)+b*np.cos(theta)+yc
x3 = -a*np.cos(theta)+b*np.sin(theta)+xc
y3 = -a*np.sin(theta)-b*np.cos(theta)+yc
x4 = a*np.cos(theta)+b*np.sin(theta)+xc
y4 = a*np.sin(theta)-b*np.cos(theta)+yc
rec_list = []
rec_list.append(round(x1/scale,2))
rec_list.append(round(y1/scale,2))
rec_list.append(round(x2/scale,2))
rec_list.append(round(y2/scale,2))
rec_list.append(round(x3/scale,2))
rec_list.append(round(y3/scale,2))
rec_list.append(round(x4/scale,2))
rec_list.append(round(y4/scale,2))
return rec_list,xc_yc,theta
# generate input data for CNN
def input_data(path,rec_list,scale):
img_list = load_picture(path,scale)
x = rec_list + img_list
x = np.array(x,dtype=np.float32).reshape((1,57608))
return x
# draw grasp rectangle
def draw_grasp_rectangle(color1,color2):
pygame.draw.line(screen, rec_color1, (x[0][0]*scale,x[0][1]*scale), (x[0][2]*scale,x[0][3]*scale),5)
pygame.draw.line(screen, rec_color2, (x[0][2]*scale,x[0][3]*scale), (x[0][4]*scale,x[0][5]*scale),5)
pygame.draw.line(screen, rec_color1, (x[0][4]*scale,x[0][5]*scale), (x[0][6]*scale,x[0][7]*scale),5)
pygame.draw.line(screen, rec_color2, (x[0][6]*scale,x[0][7]*scale), (x[0][0]*scale,x[0][1]*scale),5)
# write text
def captions(dir_n,pic_n,mss,rc,cnt,rad,f1,f2,f3):
text1 = f1.render("directory_n: "+str(dir_n)+" picture_n: "+str(pic_n), True, (255,255,255))
text2 = f1.render("quit: ESC", True, (255,255,255))
text3 = f2.render(mss, True, (255,0,0))
text4 = f3.render("rectangle(scaled):", True, (255,0,0))
text5 = f3.render(" "+str(rc), True, (255,0,0))
text6 = f3.render("center_point: "+str(cnt)+", angle [deg]: "+str(round(rad*(180/np.pi),2)), True, (255,0,0))
screen.blit(text1, [20, 20])
screen.blit(text2, [20, 50])
screen.blit(text3, [80, 80])
screen.blit(text4, [20, 400])
screen.blit(text5, [20, 420])
screen.blit(text6, [20, 450])
#main
if __name__ == '__main__':
#directory_n = 3
#picture_n = 77
#demo
directory_n = 5
picture_n = 77
# random checking
#directory_n = randint(7)+1
#picture_n = randint(98)+1
# multiple object recrangles will be appeard
#directory_n = 7
#picture_n = 80
# "yellow plate"
#directory_n = 3
#picture_n = 88
scale = 4
print 'directory:'+str(directory_n)+' picture:'+str(picture_n)
data_label = label_handling(directory_n,picture_n)
path = p.data_path()+data_label[0]+'/pcd'+data_label[0]+data_label[1]+'r.png'
model = nn.CNN_classification3()
serializers.load_npz('cnn03a.model', model)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
screen_size = (640, 480)
pygame.init()
pygame.display.set_mode(screen_size)
pygame.display.set_caption("random search")
screen = pygame.display.get_surface()
bg = pygame.image.load(path).convert_alpha()
rect_bg = bg.get_rect()
pygame.font.init()
font1 = pygame.font.Font(None, 30)
font2 = pygame.font.Font(None, 40)
font3 = pygame.font.Font(None, 25)
search_area = po.find_object(path)
while (1):
rec,center,angle = random_rec(search_area,scale)
x = input_data(path,rec,scale)
test_output = model.forward(chainer.Variable(x))
test_label = np.argmax(test_output.data[0])
pygame.display.update()
pygame.time.wait(500)
screen.fill((0, 0, 0))
screen.blit(bg, rect_bg)
# draw object area
for i in range(len(search_area)):
pygame.draw.rect(screen, (120,120,255), Rect(search_area[i]),3)
# draw grasp rectangle
if test_label == 1:
rec_color1 = (255,255,0)
rec_color2 = (0,255,0)
message = "evaluation: graspable"
else:
rec_color1 = (255,0,0)
rec_color2 = (0,0,255)
message = "evaluation: non-graspable"
draw_grasp_rectangle(rec_color1,rec_color2)
captions(directory_n,picture_n,message,rec,center,angle,font1,font2,font3)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
| gpl-3.0 |
Sentient07/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/tests/test_metaestimators.py | 30 | 5040 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
from sklearn.exceptions import NotFittedError
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
check_is_fitted(self, 'coef_')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises a NotFittedError
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
kandy-koblenz/people-networks | wikipedia-crawl/profile-reading-reworked.py | 1 | 3776 | import pickle
import pandas as pd
from mwclient import Site
import datetime
import time
import os
import wikitextparser as wtp
import sys
import csv
from create_profile_reading_tracker import create_profile_reading_tracker
#prev_size = 1 #DEBUG checking the size of dict
file_name = 'politician-data'
tracker_file = file_name+'-tracker.csv' # make sure the file is in parallel to this program
parsed_tracker = 'parsed_articles.csv'
'''
to store the profile data. if the folder not present then it will create it
no path delimiters please. don't use // '''
base_path = 'profile-data'
user_agent = 'Uni Koblenz-Landau student, vasilev@uni-koblenz.de'
wiki = Site(host='en.wikipedia.org', clients_useragent=user_agent)
if(not os.path.exists(base_path)):
os.makedirs(base_path)
#if the tracker file does not exists then create one
if not os.path.exists(tracker_file):
create_profile_reading_tracker(file_name, tracker_file)
if not os.path.exists(parsed_tracker):
parsed_articles = open(parsed_tracker,'w')
pw = csv.writer(parsed_articles,lineterminator='\n') #Csv writer
pw.writerows([['ind','handle','ID','finished_reading','time_taken_in_mins']]) # Writing column names
parsed_articles.close()
#parsed_articles = open(parsed_tracker,'w') #Opening csv file to store information about parsed articles
#pw = csv.writer(parsed_articles) #Csv writer
#pw.writerows([,'handle','ID','finished_reading','time_taken_in_mins']) # Writing column names
def read_profile_tracker() :
global profile_tracker
profile_tracker = pd.read_csv(tracker_file,encoding='ISO-8859-1')
profile_tracker = profile_tracker[['handle','ID']]#,'finished_reading','time_taken_in_mins']]
ptr = pd.read_csv(parsed_tracker,encoding='ISO-8859-1',index_col='ind')
profile_tracker = profile_tracker.iloc[ptr.shape[0]:]
"""
def get_unread_profile(profile_tracker):
data_to_be_read = profile_tracker[profile_tracker['finished_reading'] == False]
if(data_to_be_read.shape[0] > 0) :
#returns the name and ID
return [data_to_be_read.iloc[0]['handle'],data_to_be_read.iloc[0]['ID']]
else:
return None
def write_read_profile(profile_tracker, profile,time_taken):
# this could be improved - rather than filtering two times - get the row handle and update it
pt = profile_tracker[profile_tracker['handle'] == profile]#.index[0]
#profile_tracker.set_value(ptindex, 'finished_reading', True)
#profile_tracker.set_value(ptindex, 'time_taken_in_mins', time_taken)
#profile_tracker.loc[profile_tracker['handle'] == profile, 'finished_reading'] = True
#profile_tracker.loc[profile_tracker['handle'] == profile, 'time_taken_in_mins'] = time_taken
#profile_tracker.to_csv(tracker_file)
parsed_articles = open(parsed_tracker,'a')
pw = csv.writer(parsed_articles) #Csv writer
pw.writerows([pt.index[0],profile,ptiloc[0,1],True,time_taken]) #Writing down info about parsed file
parsed_articles.close()
"""
start_time = time.time()
profile_count = 0
profile_tracker = None
read_profile_tracker()
# Init date list (build backwards, because revisions are in backwards order as well)
# -> we are going back in time
dates = []
for year in range(2016,2000,-1):
for month in range(12,0,-1):
dates.append({'year':year, 'month':month})
from parse_one_article import parse_one_article
for ind, row in profile_tracker.iterrows():
profile_count += 1
unread_profile = [row['handle'],row['ID']]
# Init biography page and output dict
parse_one_article(unread_profile,dates,ind,base_path,parsed_tracker,profile_count)
end_time = time.time()
print('Total Time taken (in mins)-',(end_time - start_time) / 60)
print('No. of profiles read :',profile_count) | mit |