repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/linear_model/sag.py | 29 | 11291 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| unlicense |
rkmaddox/mne-python | tutorials/preprocessing/45_projectors_background.py | 10 | 22379 | # -*- coding: utf-8 -*-
"""
.. _tut-projectors-background:
Background on projectors and projections
========================================
This tutorial provides background information on projectors and Signal Space
Projection (SSP), and covers loading and saving projectors, adding and removing
projectors from Raw objects, the difference between "applied" and "unapplied"
projectors, and at what stages MNE-Python applies projectors automatically.
We'll start by importing the Python modules we need; we'll also define a short
function to make it easier to make several plots that look similar:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
from scipy.linalg import svd
import mne
def setup_3d_axes():
ax = plt.axes(projection='3d')
ax.view_init(azim=-105, elev=20)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(-1, 5)
ax.set_ylim(-1, 5)
ax.set_zlim(0, 5)
return ax
###############################################################################
# What is a projection?
# ^^^^^^^^^^^^^^^^^^^^^
#
# In the most basic terms, a *projection* is an operation that converts one set
# of points into another set of points, where repeating the projection
# operation on the resulting points has no effect. To give a simple geometric
# example, imagine the point :math:`(3, 2, 5)` in 3-dimensional space. A
# projection of that point onto the :math:`x, y` plane looks a lot like a
# shadow cast by that point if the sun were directly above it:
ax = setup_3d_axes()
# plot the vector (3, 2, 5)
origin = np.zeros((3, 1))
point = np.array([[3, 2, 5]]).T
vector = np.hstack([origin, point])
ax.plot(*vector, color='k')
ax.plot(*point, color='k', marker='o')
# project the vector onto the x,y plane and plot it
xy_projection_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]])
projected_point = xy_projection_matrix @ point
projected_vector = xy_projection_matrix @ vector
ax.plot(*projected_vector, color='C0')
ax.plot(*projected_point, color='C0', marker='o')
# add dashed arrow showing projection
arrow_coords = np.concatenate([point, projected_point - point]).flatten()
ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1, color='C1',
linewidth=1, linestyle='dashed')
###############################################################################
#
# .. note::
#
# The ``@`` symbol indicates matrix multiplication on NumPy arrays, and was
# introduced in Python 3.5 / NumPy 1.10. The notation ``plot(*point)`` uses
# Python `argument expansion`_ to "unpack" the elements of ``point`` into
# separate positional arguments to the function. In other words,
# ``plot(*point)`` expands to ``plot(3, 2, 5)``.
#
# Notice that we used matrix multiplication to compute the projection of our
# point :math:`(3, 2, 5)`onto the :math:`x, y` plane:
#
# .. math::
#
# \left[
# \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix}
# \right]
# \left[ \begin{matrix} 3 \\ 2 \\ 5 \end{matrix} \right] =
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right]
#
# ...and that applying the projection again to the result just gives back the
# result again:
#
# .. math::
#
# \left[
# \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix}
# \right]
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right] =
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right]
#
# From an information perspective, this projection has taken the point
# :math:`x, y, z` and removed the information about how far in the :math:`z`
# direction our point was located; all we know now is its position in the
# :math:`x, y` plane. Moreover, applying our projection matrix to *any point*
# in :math:`x, y, z` space will reduce it to a corresponding point on the
# :math:`x, y` plane. The term for this is a *subspace*: the projection matrix
# projects points in the original space into a *subspace* of lower dimension
# than the original. The reason our subspace is the :math:`x,y` plane (instead
# of, say, the :math:`y,z` plane) is a direct result of the particular values
# in our projection matrix.
#
#
# Example: projection as noise reduction
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Another way to describe this "loss of information" or "projection into a
# subspace" is to say that projection reduces the rank (or "degrees of
# freedom") of the measurement — here, from 3 dimensions down to 2. On the
# other hand, if you know that measurement component in the :math:`z` direction
# is just noise due to your measurement method, and all you care about are the
# :math:`x` and :math:`y` components, then projecting your 3-dimensional
# measurement into the :math:`x, y` plane could be seen as a form of noise
# reduction.
#
# Of course, it would be very lucky indeed if all the measurement noise were
# concentrated in the :math:`z` direction; you could just discard the :math:`z`
# component without bothering to construct a projection matrix or do the matrix
# multiplication. Suppose instead that in order to take that measurement you
# had to pull a trigger on a measurement device, and the act of pulling the
# trigger causes the device to move a little. If you measure how
# trigger-pulling affects measurement device position, you could then "correct"
# your real measurements to "project out" the effect of the trigger pulling.
# Here we'll suppose that the average effect of the trigger is to move the
# measurement device by :math:`(3, -1, 1)`:
trigger_effect = np.array([[3, -1, 1]]).T
###############################################################################
# Knowing that, we can compute a plane that is orthogonal to the effect of the
# trigger (using the fact that a plane through the origin has equation
# :math:`Ax + By + Cz = 0` given a normal vector :math:`(A, B, C)`), and
# project our real measurements onto that plane.
# compute the plane orthogonal to trigger_effect
x, y = np.meshgrid(np.linspace(-1, 5, 61), np.linspace(-1, 5, 61))
A, B, C = trigger_effect
z = (-A * x - B * y) / C
# cut off the plane below z=0 (just to make the plot nicer)
mask = np.where(z >= 0)
x = x[mask]
y = y[mask]
z = z[mask]
###############################################################################
# Computing the projection matrix from the ``trigger_effect`` vector is done
# using `singular value decomposition <svd_>`_ (SVD); interested readers may
# consult the internet or a linear algebra textbook for details on this method.
# With the projection matrix in place, we can project our original vector
# :math:`(3, 2, 5)` to remove the effect of the trigger, and then plot it:
# sphinx_gallery_thumbnail_number = 2
# compute the projection matrix
U, S, V = svd(trigger_effect, full_matrices=False)
trigger_projection_matrix = np.eye(3) - U @ U.T
# project the vector onto the orthogonal plane
projected_point = trigger_projection_matrix @ point
projected_vector = trigger_projection_matrix @ vector
# plot the trigger effect and its orthogonal plane
ax = setup_3d_axes()
ax.plot_trisurf(x, y, z, color='C2', shade=False, alpha=0.25)
ax.quiver3D(*np.concatenate([origin, trigger_effect]).flatten(),
arrow_length_ratio=0.1, color='C2', alpha=0.5)
# plot the original vector
ax.plot(*vector, color='k')
ax.plot(*point, color='k', marker='o')
offset = np.full((3, 1), 0.1)
ax.text(*(point + offset).flat, '({}, {}, {})'.format(*point.flat), color='k')
# plot the projected vector
ax.plot(*projected_vector, color='C0')
ax.plot(*projected_point, color='C0', marker='o')
offset = np.full((3, 1), -0.2)
ax.text(*(projected_point + offset).flat,
'({}, {}, {})'.format(*np.round(projected_point.flat, 2)),
color='C0', horizontalalignment='right')
# add dashed arrow showing projection
arrow_coords = np.concatenate([point, projected_point - point]).flatten()
ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1,
color='C1', linewidth=1, linestyle='dashed')
###############################################################################
# Just as before, the projection matrix will map *any point* in :math:`x, y, z`
# space onto that plane, and once a point has been projected onto that plane,
# applying the projection again will have no effect. For that reason, it should
# be clear that although the projected points vary in all three :math:`x`,
# :math:`y`, and :math:`z` directions, the set of projected points have only
# two *effective* dimensions (i.e., they are constrained to a plane).
#
# .. sidebar:: Terminology
#
# In MNE-Python, the matrix used to project a raw signal into a subspace is
# usually called a :term:`projector` or a *projection
# operator* — these terms are interchangeable with the term *projection
# matrix* used above.
#
# Projections of EEG or MEG signals work in very much the same way: the point
# :math:`x, y, z` corresponds to the value of each sensor at a single time
# point, and the projection matrix varies depending on what aspects of the
# signal (i.e., what kind of noise) you are trying to project out. The only
# real difference is that instead of a single 3-dimensional point :math:`(x, y,
# z)` you're dealing with a time series of :math:`N`-dimensional "points" (one
# at each sampling time), where :math:`N` is usually in the tens or hundreds
# (depending on how many sensors your EEG/MEG system has). Fortunately, because
# projection is a matrix operation, it can be done very quickly even on signals
# with hundreds of dimensions and tens of thousands of time points.
#
#
# .. _ssp-tutorial:
#
# Signal-space projection (SSP)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We mentioned above that the projection matrix will vary depending on what
# kind of noise you are trying to project away. Signal-space projection (SSP)
# :footcite:`UusitaloIlmoniemi1997` is a way of estimating what that projection
# matrix should be, by
# comparing measurements with and without the signal of interest. For example,
# you can take additional "empty room" measurements that record activity at the
# sensors when no subject is present. By looking at the spatial pattern of
# activity across MEG sensors in an empty room measurement, you can create one
# or more :math:`N`-dimensional vector(s) giving the "direction(s)" of
# environmental noise in sensor space (analogous to the vector for "effect of
# the trigger" in our example above). SSP is also often used for removing
# heartbeat and eye movement artifacts — in those cases, instead of empty room
# recordings the direction of the noise is estimated by detecting the
# artifacts, extracting epochs around them, and averaging. See
# :ref:`tut-artifact-ssp` for examples.
#
# Once you know the noise vectors, you can create a hyperplane that is
# orthogonal
# to them, and construct a projection matrix to project your experimental
# recordings onto that hyperplane. In that way, the component of your
# measurements associated with environmental noise can be removed. Again, it
# should be clear that the projection reduces the dimensionality of your data —
# you'll still have the same number of sensor signals, but they won't all be
# *linearly independent* — but typically there are tens or hundreds of sensors
# and the noise subspace that you are eliminating has only 3-5 dimensions, so
# the loss of degrees of freedom is usually not problematic.
#
#
# Projectors in MNE-Python
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# In our example data, :ref:`SSP <ssp-tutorial>` has already been performed
# using empty room recordings, but the :term:`projectors <projector>` are
# stored alongside the raw data and have not been *applied* yet (or,
# synonymously, the projectors are not *active* yet). Here we'll load
# the :ref:`sample data <sample-dataset>` and crop it to 60 seconds; you can
# see the projectors in the output of :func:`~mne.io.read_raw_fif` below:
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# In MNE-Python, the environmental noise vectors are computed using `principal
# component analysis <pca_>`_, usually abbreviated "PCA", which is why the SSP
# projectors usually have names like "PCA-v1". (Incidentally, since the process
# of performing PCA uses `singular value decomposition <svd_>`_ under the hood,
# it is also common to see phrases like "projectors were computed using SVD" in
# published papers.) The projectors are stored in the ``projs`` field of
# ``raw.info``:
print(raw.info['projs'])
###############################################################################
# ``raw.info['projs']`` is an ordinary Python :class:`list` of
# :class:`~mne.Projection` objects, so you can access individual projectors by
# indexing into it. The :class:`~mne.Projection` object itself is similar to a
# Python :class:`dict`, so you can use its ``.keys()`` method to see what
# fields it contains (normally you don't need to access its properties
# directly, but you can if necessary):
first_projector = raw.info['projs'][0]
print(first_projector)
print(first_projector.keys())
###############################################################################
# The :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked`
# objects all have a boolean :attr:`~mne.io.Raw.proj` attribute that indicates
# whether there are any unapplied / inactive projectors stored in the object.
# In other words, the :attr:`~mne.io.Raw.proj` attribute is ``True`` if at
# least one :term:`projector` is present and all of them are active. In
# addition, each individual projector also has a boolean ``active`` field:
print(raw.proj)
print(first_projector['active'])
###############################################################################
# Computing projectors
# ~~~~~~~~~~~~~~~~~~~~
#
# In MNE-Python, SSP vectors can be computed using general purpose functions
# :func:`mne.compute_proj_raw`, :func:`mne.compute_proj_epochs`, and
# :func:`mne.compute_proj_evoked`. The general assumption these functions make
# is that the data passed contains raw data, epochs or averages of the artifact
# you want to repair via projection. In practice this typically involves
# continuous raw data of empty room recordings or averaged ECG or EOG
# artifacts. A second set of high-level convenience functions is provided to
# compute projection vectors for typical use cases. This includes
# :func:`mne.preprocessing.compute_proj_ecg` and
# :func:`mne.preprocessing.compute_proj_eog` for computing the ECG and EOG
# related artifact components, respectively; see :ref:`tut-artifact-ssp` for
# examples of these uses. For computing the EEG reference signal as a
# projector, the function :func:`mne.set_eeg_reference` can be used; see
# :ref:`tut-set-eeg-ref` for more information.
#
# .. warning:: It is best to compute projectors only on channels that will be
# used (e.g., excluding bad channels). This ensures that
# projection vectors will remain ortho-normalized and that they
# properly capture the activity of interest.
#
#
# Visualizing the effect of projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You can see the effect the projectors are having on the measured signal by
# comparing plots with and without the projectors applied. By default,
# ``raw.plot()`` will apply the projectors in the background before plotting
# (without modifying the :class:`~mne.io.Raw` object); you can control this
# with the boolean ``proj`` parameter as shown below, or you can turn them on
# and off interactively with the projectors interface, accessed via the
# :kbd:`Proj` button in the lower right corner of the plot window. Here we'll
# look at just the magnetometers, and a 2-second sample from the beginning of
# the file.
mags = raw.copy().crop(tmax=2).pick_types(meg='mag')
for proj in (False, True):
fig = mags.plot(butterfly=True, proj=proj)
fig.subplots_adjust(top=0.9)
fig.suptitle('proj={}'.format(proj), size='xx-large', weight='bold')
###############################################################################
# Additional ways of visualizing projectors are covered in the tutorial
# :ref:`tut-artifact-ssp`.
#
#
# Loading and saving projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# SSP can be used for other types of signal cleaning besides just reduction of
# environmental noise. You probably noticed two large deflections in the
# magnetometer signals in the previous plot that were not removed by the
# empty-room projectors — those are artifacts of the subject's heartbeat. SSP
# can be used to remove those artifacts as well. The sample data includes
# projectors for heartbeat noise reduction that were saved in a separate file
# from the raw data, which can be loaded with the :func:`mne.read_proj`
# function:
ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_ecg-proj.fif')
ecg_projs = mne.read_proj(ecg_proj_file)
print(ecg_projs)
###############################################################################
# There is a corresponding :func:`mne.write_proj` function that can be used to
# save projectors to disk in ``.fif`` format:
#
# .. code-block:: python3
#
# mne.write_proj('heartbeat-proj.fif', ecg_projs)
#
# .. note::
#
# By convention, MNE-Python expects projectors to be saved with a filename
# ending in ``-proj.fif`` (or ``-proj.fif.gz``), and will issue a warning
# if you forgo this recommendation.
#
#
# Adding and removing projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Above, when we printed the ``ecg_projs`` list that we loaded from a file, it
# showed two projectors for gradiometers (the first two, marked "planar"), two
# for magnetometers (the middle two, marked "axial"), and two for EEG sensors
# (the last two, marked "eeg"). We can add them to the :class:`~mne.io.Raw`
# object using the :meth:`~mne.io.Raw.add_proj` method:
raw.add_proj(ecg_projs)
###############################################################################
# To remove projectors, there is a corresponding method
# :meth:`~mne.io.Raw.del_proj` that will remove projectors based on their index
# within the ``raw.info['projs']`` list. For the special case of replacing the
# existing projectors with new ones, use
# ``raw.add_proj(ecg_projs, remove_existing=True)``.
#
# To see how the ECG projectors affect the measured signal, we can once again
# plot the data with and without the projectors applied (though remember that
# the :meth:`~mne.io.Raw.plot` method only *temporarily* applies the projectors
# for visualization, and does not permanently change the underlying data).
# We'll compare the ``mags`` variable we created above, which had only the
# empty room SSP projectors, to the data with both empty room and ECG
# projectors:
mags_ecg = raw.copy().crop(tmax=2).pick_types(meg='mag')
for data, title in zip([mags, mags_ecg], ['Without', 'With']):
fig = data.plot(butterfly=True, proj=True)
fig.subplots_adjust(top=0.9)
fig.suptitle('{} ECG projector'.format(title), size='xx-large',
weight='bold')
###############################################################################
# When are projectors "applied"?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, projectors are applied when creating :class:`epoched
# <mne.Epochs>` data from :class:`~mne.io.Raw` data, though application of the
# projectors can be *delayed* by passing ``proj=False`` to the
# :class:`~mne.Epochs` constructor. However, even when projectors have not been
# applied, the :meth:`mne.Epochs.get_data` method will return data *as if the
# projectors had been applied* (though the :class:`~mne.Epochs` object will be
# unchanged). Additionally, projectors cannot be applied if the data are not
# :ref:`preloaded <memory>`. If the data are `memory-mapped`_ (i.e., not
# preloaded), you can check the ``_projector`` attribute to see whether any
# projectors will be applied once the data is loaded in memory.
#
# Finally, when performing inverse imaging (i.e., with
# :func:`mne.minimum_norm.apply_inverse`), the projectors will be
# automatically applied. It is also possible to apply projectors manually when
# working with :class:`~mne.io.Raw`, :class:`~mne.Epochs` or
# :class:`~mne.Evoked` objects via the object's :meth:`~mne.io.Raw.apply_proj`
# method. For all instance types, you can always copy the contents of
# :samp:`{<instance>}.info['projs']` into a separate :class:`list` variable,
# use :samp:`{<instance>}.del_proj({<index of proj(s) to remove>})` to remove
# one or more projectors, and then add them back later with
# :samp:`{<instance>}.add_proj({<list containing projs>})` if desired.
#
# .. warning::
#
# Remember that once a projector is applied, it can't be un-applied, so
# during interactive / exploratory analysis it's a good idea to use the
# object's :meth:`~mne.io.Raw.copy` method before applying projectors.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# In general, it is recommended to apply projectors when creating
# :class:`~mne.Epochs` from :class:`~mne.io.Raw` data. There are two reasons
# for this recommendation:
#
# 1. It is computationally cheaper to apply projectors to data *after* the
# data have been reducted to just the segments of interest (the epochs)
#
# 2. If you are applying amplitude-based rejection criteria to epochs, it is
# preferable to reject based on the signal *after* projectors have been
# applied, because the projectors may reduce noise in some epochs to
# tolerable levels (thereby increasing the number of acceptable epochs and
# consequenty increasing statistical power in any later analyses).
#
#
# References
# ^^^^^^^^^^
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`argument expansion`:
# https://docs.python.org/3/tutorial/controlflow.html#tut-unpacking-arguments
# .. _`pca`: https://en.wikipedia.org/wiki/Principal_component_analysis
# .. _`svd`: https://en.wikipedia.org/wiki/Singular_value_decomposition
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
| bsd-3-clause |
amitsandhel/PA273-Potentiostat-Software | test/test_pa273_v2.py | 1 | 6556 | #!/usr/bin/python
# encoding: utf-8
# test_beastie.py
'''
Created by Amit Sandhel.
This module is tests the py273_v2.py script
'''
import sys
import os
import unittest
import time
import logging
#import pprint
#pp = pprint.pprint
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
#print "\nNew System Path:"
#pp(sys.path)
#importing beastie module for testing
import pa273_v2
pa273_v2.Serial = pa273_v2.Fake_Serial
class pa273_v2test(unittest.TestCase):
'''unittest class for testing and TDD of beastie.py script ONLY'''
def setUp(self):
self.m= pa273_v2.MySerialPort()
self.m.open_port()
self.debug = True
self.logger = logging.getLogger('MAIN')
def tearDown(self):
self.m.close_port()
def diag_print(self, msg= '\n'):
if self.debug:
print (msg)
def test01(self):
''' Test basic command list parsing '''
test_commands = (
("0, BIAS -200"),
('100, BIASS'),
('200, BIAS -300'),
('300, BIASS'),
('400, BIAS 400'),
("500, BIASS"),
)
self.m.parse_commands(test_commands)
self.assertEqual(len(test_commands), len(self.m.command_dict))
for item in test_commands:
time, cmd = item.strip().split(",")
self.assertEquals(self.m.command_dict[float(time)], (cmd.strip(), ) )
def test02(self):
''' testing the readfiles command'''
'''verifying the right file is even opened to begin with'''
file2 = None
#self.b = self.m.readfiles()
self.assertEqual(file2, None) #self.m.readfiles())
def test03(self):
'''testing the read_command() function
specifically the self.cmd_output variable
'''
self.m.command_dict = {0:("BIAS 200",), 2: ("BIAS 400",), 3:("BIAS 300",) }
sorted_list = [0, 2,3]
self.m.read_command()
self.assertEqual(sorted_list, self.m.cmd_output)
def test04(self):
''' testing the get_next_command()
testing the efficiency of the reply command and that it
is reporting the value that is expected with the correct
time index
'''
self.m.command_dict = {0:("BIAS 200",), 3: ("BIAS 500",), 5:("BIAS 300",),4: ("BIAS 400",)}
self.m.cmd_output = [0,3,4,5]
keylist = [0,3,4,5]
cmd = [('BIAS 200',), ('BIAS 500',),('BIAS 400',),('BIAS 300',)]
for x in range(4):
a = keylist.pop(0)
b = cmd.pop(0)
reply2 = (a,b)
next_time, next_cmd = self.m.get_next_command()
reply = (next_time, next_cmd)
self.assertEqual(reply2, reply)
def test05(self):
''' testing the while function in get_next_command
itself with a correct timer
'''
self.m.command_dict = {1: ("BIAS 200",), 3: ("BIASS",), 8:("BIAS 300",), 6: ("BIAS 400",)}
self.m.cmd_output = [1,3,6,8]
# using this keylist_output and the cmdlist_output lists to compare the outputs from the next_time and next_cmd if loop
keylist_output = [1.0,3.0,6.0,8.0]
cmdlist_output = [('BIAS 200',), ('BIASS',),('BIAS 400',),('BIAS 300',)]
start_time = time.time()
new_time = 0
while True:
elapsed_time = time.time() - start_time
#next_time, next_cmd = self.m.get_next_command()
#if start_time > 0:
if elapsed_time >= new_time:
if len(self.m.cmd_output) == 0:
self.assertEqual(0, len(self.m.cmd_output))
break
# else:
next_time, next_cmd = self.m.get_next_command()
reply = (next_time, next_cmd)
#print (time.time(), reply)
new_time = reply[0]
self.assertEqual(keylist_output.pop(0), next_time)
self.assertEqual(cmdlist_output.pop(0), next_cmd)
def test06(self):
'''testing the time_meter_command() function which
is the while true loop found in test05 which is rebuilt
'''
self.m.command_dict = {1: ("BIAS 200",), 3: ("BIAS 500",), 5:("BIAS 300",), 4: ("BIAS 400",)}
self.m.cmd_output = [1,3,4,5]
# using this keylist_output and the cmdlist_output lists to compare the outputs from the next_time and next_cmd if loop
keylist_output = [1.0,3.0,4.0,5.0]
cmdlist_output = [(1.0,('BIAS 200',)), (3.0, ('BIAS 500',)),(4.0, ('BIAS 400',)),(5.0, ('BIAS 300',))]
for x in range(3):
next_time = self.m.run() #time_meter_command()
break
self.assertEqual(cmdlist_output.pop(0), next_time)
def test07(self):
'''testing the execute_command()
with the simulator
'''
self.m.command_dict = {"2": ("BIAS 200",), "3": ("BIAS 250",), "4":("BIAS 300",),"5": ("BIAS 400",),
"6": ("BIAS 500",), "7": ("BIAS 550",), "10":("BIAS 600",), "15": ("BIAS 700",)}
self.m.cmd_output = ['2', '3']
#self.m.run() #time_meter_command()
def test08(self):
'''testing the entire time_meter_command() function
simply by running the time_meter_command() function as is
'''
self.m.run() #time_meter_command()
def test09(self):
'''converted test10() into a class and moved upwards the class works successfully
now onwards to real time'''
#self.n.make_graph()
'''
IMPORTANT IMPORTNAT IMPORNTAT LINK PLEASE LOOK AT
ESPECIALLY THE TOP THREE WEBSITES
http://stackoverflow.com/questions/17518085/python-real-time-plotting-memory-leak
http://stackoverflow.com/questions/17039901/plot-time-values-with-matplotlib
http://synesthesiam.com/posts/an-exercise-with-functions-and-plotting.html
http://matplotlib.org/users/recipes.html
http://stackoverflow.com/questions/1574088/plotting-time-in-python-with-matplotlib
http://www.loria.fr/~rougier/teaching/matplotlib/#simple-plot
http://matplotlib.org/users/pyplot_tutorial.html
http://matplotlib.org/examples/index.html
'''
pass
###########################################################################
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
aurora95/Keras-FCN | models.py | 1 | 17472 | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
from keras_contrib.applications import densenet
from keras.models import Model
from keras.regularizers import l2
from keras.layers import *
from keras.engine import Layer
from keras.applications.vgg16 import *
from keras.models import *
from keras.applications.imagenet_utils import _obtain_input_shape
import keras.backend as K
import tensorflow as tf
from utils.get_weights_path import *
from utils.basics import *
from utils.resnet_helpers import *
from utils.BilinearUpSampling import *
def top(x, input_shape, classes, activation, weight_decay):
x = Conv2D(classes, (1, 1), activation='linear',
padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False)(x)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
# TODO(ahundt) this is modified for the sigmoid case! also use loss_shape
if activation is 'sigmoid':
x = Reshape((row * col * classes,))(x)
return x
def FCN_Vgg16_32s(input_shape=None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
# Convolutional layers transfered from fully-connected layers
x = Conv2D(4096, (7, 7), activation='relu', padding='same', name='fc1', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same', name='fc2', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
#classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(size=(32, 32))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_vgg16_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def AtrousFCN_Vgg16_16s(input_shape=None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', kernel_regularizer=l2(weight_decay))(x)
# Convolutional layers transfered from fully-connected layers
x = Conv2D(4096, (7, 7), activation='relu', padding='same', dilation_rate=(2, 2),
name='fc1', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same', name='fc2', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
#classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(target_size=tuple(image_size))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_vgg16_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def FCN_Resnet50_32s(input_shape = None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
bn_axis = 3
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)
x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
#classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(size=(32, 32))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_resnet50_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def AtrousFCN_Resnet50_16s(input_shape = None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
bn_axis = 3
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', weight_decay=weight_decay, strides=(1, 1), batch_momentum=batch_momentum)(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = atrous_conv_block(3, [512, 512, 2048], stage=5, block='a', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)
x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='b', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)
x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='c', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)
#classifying layer
#x = Conv2D(classes, (3, 3), dilation_rate=(2, 2), kernel_initializer='normal', activation='linear', padding='same', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='same', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(target_size=tuple(image_size))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_resnet50_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def Atrous_DenseNet(input_shape=None, weight_decay=1E-4,
batch_momentum=0.9, batch_shape=None, classes=21,
include_top=False, activation='sigmoid'):
# TODO(ahundt) pass the parameters but use defaults for now
if include_top is True:
# TODO(ahundt) Softmax is pre-applied, so need different train, inference, evaluate.
# TODO(ahundt) for multi-label try per class sigmoid top as follows:
# x = Reshape((row * col * classes))(x)
# x = Activation('sigmoid')(x)
# x = Reshape((row, col, classes))(x)
return densenet.DenseNet(depth=None, nb_dense_block=3, growth_rate=32,
nb_filter=-1, nb_layers_per_block=[6, 12, 24, 16],
bottleneck=True, reduction=0.5, dropout_rate=0.2,
weight_decay=1E-4,
include_top=True, top='segmentation',
weights=None, input_tensor=None,
input_shape=input_shape,
classes=classes, transition_dilation_rate=2,
transition_kernel_size=(1, 1),
transition_pooling=None)
# if batch_shape:
# img_input = Input(batch_shape=batch_shape)
# image_size = batch_shape[1:3]
# else:
# img_input = Input(shape=input_shape)
# image_size = input_shape[0:2]
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=16,
data_format=K.image_data_format(),
include_top=False)
img_input = Input(shape=input_shape)
x = densenet.__create_dense_net(classes, img_input,
depth=None, nb_dense_block=3, growth_rate=32,
nb_filter=-1, nb_layers_per_block=[6, 12, 24, 16],
bottleneck=True, reduction=0.5, dropout_rate=0.2,
weight_decay=1E-4, top='segmentation',
input_shape=input_shape,
transition_dilation_rate=2,
transition_kernel_size=(1, 1),
transition_pooling=None,
include_top=include_top)
x = top(x, input_shape, classes, activation, weight_decay)
model = Model(img_input, x, name='Atrous_DenseNet')
# TODO(ahundt) add weight loading
return model
def DenseNet_FCN(input_shape=None, weight_decay=1E-4,
batch_momentum=0.9, batch_shape=None, classes=21,
include_top=False, activation='sigmoid'):
if include_top is True:
# TODO(ahundt) Softmax is pre-applied, so need different train, inference, evaluate.
# TODO(ahundt) for multi-label try per class sigmoid top as follows:
# x = Reshape((row * col * classes))(x)
# x = Activation('sigmoid')(x)
# x = Reshape((row, col, classes))(x)
return densenet.DenseNetFCN(input_shape=input_shape,
weights=None, classes=classes,
nb_layers_per_block=[4, 5, 7, 10, 12, 15],
growth_rate=16,
dropout_rate=0.2)
# if batch_shape:
# img_input = Input(batch_shape=batch_shape)
# image_size = batch_shape[1:3]
# else:
# img_input = Input(shape=input_shape)
# image_size = input_shape[0:2]
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=16,
data_format=K.image_data_format(),
include_top=False)
img_input = Input(shape=input_shape)
x = densenet.__create_fcn_dense_net(classes, img_input,
input_shape=input_shape,
nb_layers_per_block=[4, 5, 7, 10, 12, 15],
growth_rate=16,
dropout_rate=0.2,
include_top=include_top)
x = top(x, input_shape, classes, activation, weight_decay)
# TODO(ahundt) add weight loading
model = Model(img_input, x, name='DenseNet_FCN')
return model
| mit |
shiqiangli/tushare | tushare/stock/trading.py | 14 | 21568 | # -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
return data
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
frow = df.head(1)
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
print(du.get_hour())
print((du.get_hour() > 9) & (du.get_hour() < 18) )
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending=False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6'] else 'sz%s'%code
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
massmutual/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 47 | 8566 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, nmf.NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = nmf.NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = nmf.NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = nmf.NMF(solver=solver, n_components=4, init='nndsvd',
random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0,
tol=tol).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = nmf.NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = nmf.NMF(solver=solver, random_state=0, tol=1e-4,
n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = nmf.non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = nmf.non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = nmf.NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = nmf.non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
| bsd-3-clause |
abhishekkrthakur/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Workday/OpenFrame | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
lin-credible/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
sjpet/epysteme | setup.py | 1 | 1170 | try:
from setuptools import setup
except ImportError:
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
from epysteme import __version__
setup(name='epysteme',
packages=['epysteme'],
version=__version__,
description='A neat machine learning package for python',
license='mit',
author='Stefan Peterson',
author_email='stefan.peterson@rubico.com',
url='https://github.com/sjpet/epysteme',
download_url='https://github.com/sjpet/epysteme/tarball/%s' % __version__,
keywords='machine learning data mining out-of-memory',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3'],
install_requires=['pandas', 'numpy', 'seaborn', 'matplotlib'], #, 'scikit-learn'],
extras_require={'MSSQL': ['pyodbc']},
tests_require=['pytest', 'pytest-mysql'])
| mit |
jshiv/turntable | test/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py | 12 | 7174 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
points : (Npoints, Ndims) ndarray of floats
Data point coordinates.
values : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
>>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| mit |
petewarden/tensorflow_makefile | tensorflow/contrib/learn/python/learn/tests/test_io.py | 8 | 4006 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.io import *
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
class IOTest(tf.test.TestCase):
def test_pandas_dataframe(self):
if HAS_PANDAS:
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(data, labels)
score = accuracy_score(labels[0], classifier.predict(data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(data, labels)
score = accuracy_score(labels, classifier.predict(data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
# test dask.dataframe
df = pd.DataFrame(
dict(a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(data, labels)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rahul-c1/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
chrischoy/3D-R2N2 | lib/test_net.py | 1 | 2929 | import os
import numpy as np
import scipy.io as sio
import inspect
import sklearn.metrics
from multiprocessing import Queue
# Theano & network
from models import load_model
from lib.config import cfg
from lib.solver import Solver
from lib.data_io import category_model_id_pair
from lib.data_process import make_data_processes, get_while_running
from lib.voxel import evaluate_voxel_prediction
def test_net():
''' Evaluate the network '''
# Make result directory and the result file.
result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
result_fn = os.path.join(result_dir, 'result.mat')
print("Exp file will be written to: " + result_fn)
# Make a network and load weights
NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)
print('Network definition: \n')
print(inspect.getsource(NetworkClass.network_definition))
net = NetworkClass(compute_grad=False)
net.load(cfg.CONST.WEIGHTS)
solver = Solver(net)
# set constants
batch_size = cfg.CONST.BATCH_SIZE
# set up testing data process. We make only one prefetching process. The
# process will return one batch at a time.
queue = Queue(cfg.QUEUE_SIZE)
data_pair = category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION)
processes = make_data_processes(queue, data_pair, 1, repeat=False, train=False)
num_data = len(processes[0].data_paths)
num_batch = int(num_data / batch_size)
# prepare result container
results = {'cost': np.zeros(num_batch),
'mAP': np.zeros((num_batch, batch_size))}
# Save results for various thresholds
for thresh in cfg.TEST.VOXEL_THRESH:
results[str(thresh)] = np.zeros((num_batch, batch_size, 5))
# Get all test data
batch_idx = 0
for batch_img, batch_voxel in get_while_running(processes[0], queue):
if batch_idx == num_batch:
break
pred, loss, activations = solver.test_output(batch_img, batch_voxel)
for j in range(batch_size):
# Save IoU per thresh
for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
r = evaluate_voxel_prediction(pred[j, ...], batch_voxel[j, ...], thresh)
results[str(thresh)][batch_idx, j, :] = r
# Compute AP
precision = sklearn.metrics.average_precision_score(
batch_voxel[j, :, 1].flatten(), pred[j, :, 1].flatten())
results['mAP'][batch_idx, j] = precision
# record result for the batch
results['cost'][batch_idx] = float(loss)
print('%d/%d, costs: %f, mAP: %f' %
(batch_idx, num_batch, loss, np.mean(results['mAP'][batch_idx])))
batch_idx += 1
print('Total loss: %f' % np.mean(results['cost']))
print('Total mAP: %f' % np.mean(results['mAP']))
sio.savemat(result_fn, results)
| mit |
MonoCloud/zipline | zipline/utils/tradingcalendar.py | 9 | 11195 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from functools import partial
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.datetools.relativedelta(years=1)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
euri10/zipline | zipline/transforms/batch_transform.py | 20 | 17747 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generator versions of transforms.
"""
import functools
import logbook
import numpy
from numbers import Integral
import pandas as pd
from six import (
string_types,
itervalues,
iteritems
)
from zipline.utils.data import MutableIndexRollingPanel
from zipline.protocol import Event
from zipline.finance import trading
log = logbook.Logger('BatchTransform')
func_map = {'open_price': 'first',
'close_price': 'last',
'low': 'min',
'high': 'max',
'volume': 'sum'
}
def get_sample_func(item):
if item in func_map:
return func_map[item]
else:
return 'last'
def downsample_panel(minute_rp, daily_rp, mkt_close):
"""
@minute_rp is a rolling panel, which should have minutely rows
@daily_rp is a rolling panel, which should have daily rows
@dt is the timestamp to use when adding a frame to daily_rp
Using the history in minute_rp, a new daily bar is created by
downsampling. The data from the daily bar is then added to the
daily rolling panel using add_frame.
"""
cur_panel = minute_rp.get_current()
sids = minute_rp.minor_axis
day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)
dt1 = trading.environment.normalize_date(mkt_close)
dt2 = trading.environment.next_trading_day(mkt_close)
by_close = functools.partial(get_date, mkt_close, dt1, dt2)
for item in minute_rp.items:
frame = cur_panel[item]
func = get_sample_func(item)
# group by trading day, using the market close of the current
# day. If events occurred after the last close (yesterday) but
# before today's close, group them into today.
dframe = frame.groupby(lambda d: by_close(d)).agg(func)
for stock in sids:
day_frame[stock][item] = dframe[stock].ix[dt1]
# store the frame at midnight instead of the close
daily_rp.add_frame(dt1, day_frame)
def get_date(mkt_close, d1, d2, d):
if d > mkt_close:
return d2
else:
return d1
class InvalidWindowLength(Exception):
"""
Error raised when the window length is unusable.
"""
pass
def check_window_length(window_length):
"""
Ensure the window length provided to a transform is valid.
"""
if window_length is None:
raise InvalidWindowLength("window_length must be provided")
if not isinstance(window_length, Integral):
raise InvalidWindowLength(
"window_length must be an integer-like number")
if window_length == 0:
raise InvalidWindowLength("window_length must be non-zero")
if window_length < 0:
raise InvalidWindowLength("window_length must be positive")
class BatchTransform(object):
"""Base class for batch transforms with a trailing window of
variable length. As opposed to pure EventWindows that get a stream
of events and are bound to a single SID, this class creates stream
of pandas DataFrames with each colum representing a sid.
There are two ways to create a new batch window:
(i) Inherit from BatchTransform and overload get_value(data).
E.g.:
```
class MyBatchTransform(BatchTransform):
def get_value(self, data):
# compute difference between the means of sid 0 and sid 1
return data[0].mean() - data[1].mean()
```
(ii) Use the batch_transform decorator.
E.g.:
```
@batch_transform
def my_batch_transform(data):
return data[0].mean() - data[1].mean()
```
In your algorithm you would then have to instantiate
this in the initialize() method:
```
self.my_batch_transform = MyBatchTransform()
```
To then use it, inside of the algorithm handle_data(), call the
handle_data() of the BatchTransform and pass it the current event:
```
result = self.my_batch_transform(data)
```
"""
def __init__(self,
func=None,
refresh_period=0,
window_length=None,
clean_nans=True,
sids=None,
fields=None,
compute_only_full=True,
bars='daily',
downsample=False):
"""Instantiate new batch_transform object.
:Arguments:
func : python function <optional>
If supplied will be called after each refresh_period
with the data panel and all args and kwargs supplied
to the handle_data() call.
refresh_period : int
Interval to wait between advances in the window.
window_length : int
How many days the trailing window should have.
clean_nans : bool <default=True>
Whether to (forward) fill in nans.
sids : list <optional>
Which sids to include in the moving window. If not
supplied sids will be extracted from incoming
events.
fields : list <optional>
Which fields to include in the moving window
(e.g. 'price'). If not supplied, fields will be
extracted from incoming events.
compute_only_full : bool <default=True>
Only call the user-defined function once the window is
full. Returns None if window is not full yet.
downsample : bool <default=False>
If true, downsample bars to daily bars. Otherwise, do nothing.
"""
if func is not None:
self.compute_transform_value = func
else:
self.compute_transform_value = self.get_value
self.clean_nans = clean_nans
self.compute_only_full = compute_only_full
# no need to down sample if the bars are already daily
self.downsample = downsample and (bars == 'minute')
# How many bars are in a day
self.bars = bars
if self.bars == 'daily':
self.bars_in_day = 1
elif self.bars == 'minute':
self.bars_in_day = int(6.5 * 60)
else:
raise ValueError('%s bars not understood.' % self.bars)
# The following logic is to allow pre-specified sid filters
# to operate on the data, but to also allow new symbols to
# enter the batch transform's window IFF a sid filter is not
# specified.
if sids is not None:
if isinstance(sids, (string_types, Integral)):
self.static_sids = set([sids])
else:
self.static_sids = set(sids)
else:
self.static_sids = None
self.initial_field_names = fields
if isinstance(self.initial_field_names, string_types):
self.initial_field_names = [self.initial_field_names]
self.field_names = set()
self.refresh_period = refresh_period
check_window_length(window_length)
self.window_length = window_length
self.trading_days_total = 0
self.window = None
self.full = False
# Set to -inf essentially to cause update on first attempt.
self.last_dt = pd.Timestamp('1900-1-1', tz='UTC')
self.updated = False
self.cached = None
self.last_args = None
self.last_kwargs = None
# Data panel that provides bar information to fill in the window,
# when no bar ticks are available from the data source generator
# Used in universes that 'rollover', e.g. one that has a different
# set of stocks per quarter
self.supplemental_data = None
self.rolling_panel = None
self.daily_rolling_panel = None
def handle_data(self, data, *args, **kwargs):
"""
Point of entry. Process an event frame.
"""
# extract dates
dts = [event.dt for event in itervalues(data._data)]
# we have to provide the event with a dt. This is only for
# checking if the event is outside the window or not so a
# couple of seconds shouldn't matter. We don't add it to
# the data parameter, because it would mix dt with the
# sid keys.
event = Event()
event.dt = max(dts)
event.data = {k: v.__dict__ for k, v in iteritems(data._data)
# Need to check if data has a 'length' to filter
# out sids without trade data available.
# TODO: expose more of 'no trade available'
# functionality to zipline
if len(v)}
# only modify the trailing window if this is
# a new event. This is intended to make handle_data
# idempotent.
if self.last_dt < event.dt:
self.updated = True
self._append_to_window(event)
else:
self.updated = False
# return newly computed or cached value
return self.get_transform_value(*args, **kwargs)
def _init_panels(self, sids):
if self.downsample:
self.rolling_panel = MutableIndexRollingPanel(
self.bars_in_day,
self.field_names,
sids,
)
self.daily_rolling_panel = MutableIndexRollingPanel(
self.window_length,
self.field_names,
sids,
)
else:
self.rolling_panel = MutableIndexRollingPanel(
self.window_length * self.bars_in_day,
self.field_names,
sids,
)
def _append_to_window(self, event):
self.field_names = self._get_field_names(event)
if self.static_sids is None:
sids = set(event.data.keys())
else:
sids = self.static_sids
# the panel sent to the transform code will have
# columns masked with this set of sids. This is how
# we guarantee that all (and only) the sids sent to the
# algorithm's handle_data and passed to the batch
# transform. See the get_data method to see it applied.
# N.B. that the underlying panel grows monotonically
# if the set of sids changes over time.
self.latest_sids = sids
# Create rolling panel if not existant
if self.rolling_panel is None:
self._init_panels(sids)
# Store event in rolling frame
self.rolling_panel.add_frame(event.dt,
pd.DataFrame(event.data,
index=self.field_names,
columns=sids))
# update trading day counters
# we may get events from non-trading sources which occurr on
# non-trading days. The book-keeping for market close and
# trading day counting should only consider trading days.
if trading.environment.is_trading_day(event.dt):
_, mkt_close = trading.environment.get_open_and_close(event.dt)
if self.bars == 'daily':
# Daily bars have their dt set to midnight.
mkt_close = trading.environment.normalize_date(mkt_close)
if event.dt == mkt_close:
if self.downsample:
downsample_panel(self.rolling_panel,
self.daily_rolling_panel,
mkt_close
)
self.trading_days_total += 1
self.mkt_close = mkt_close
self.last_dt = event.dt
if self.trading_days_total >= self.window_length:
self.full = True
def get_transform_value(self, *args, **kwargs):
"""Call user-defined batch-transform function passing all
arguments.
Note that this will only call the transform if the datapanel
has actually been updated. Otherwise, the previously, cached
value will be returned.
"""
if self.compute_only_full and not self.full:
return None
#################################################
# Determine whether we should call the transform
# 0. Support historical/legacy usage of '0' signaling,
# 'update on every bar'
if self.refresh_period == 0:
period_signals_update = True
else:
# 1. Is the refresh period over?
period_signals_update = (
self.trading_days_total % self.refresh_period == 0)
# 2. Have the args or kwargs been changed since last time?
args_updated = args != self.last_args or kwargs != self.last_kwargs
# 3. Is this a downsampled batch, and is the last event mkt close?
downsample_ready = not self.downsample or \
self.last_dt == self.mkt_close
recalculate_needed = downsample_ready and \
(args_updated or (period_signals_update and self.updated))
###################################################
if recalculate_needed:
self.cached = self.compute_transform_value(
self.get_data(),
*args,
**kwargs
)
self.last_args = args
self.last_kwargs = kwargs
return self.cached
def get_data(self):
"""Create a pandas.Panel (i.e. 3d DataFrame) from the
events in the current window.
Returns:
The resulting panel looks like this:
index : field_name (e.g. price)
major axis/rows : dt
minor axis/colums : sid
"""
if self.downsample:
data = self.daily_rolling_panel.get_current()
else:
data = self.rolling_panel.get_current()
if self.supplemental_data is not None:
for item in data.items:
if item not in self.supplemental_data.items:
continue
for dt in data.major_axis:
try:
supplemental_for_dt = self.supplemental_data.ix[
item, dt, :]
except KeyError:
# Only filling in data available in supplemental data.
supplemental_for_dt = None
if supplemental_for_dt is not None:
data[item].ix[dt] = \
supplemental_for_dt.combine_first(
data[item].ix[dt])
# screen out sids no longer in the multiverse
data = data.ix[:, :, self.latest_sids]
if self.clean_nans:
# Fills in gaps of missing data during transform
# of multiple stocks. E.g. we may be missing
# minute data because of illiquidity of one stock
data = data.fillna(method='ffill')
# Hold on to a reference to the data,
# so that it's easier to find the current data when stepping
# through with a debugger
self._curr_data = data
return data
def get_value(self, *args, **kwargs):
raise NotImplementedError(
"Either overwrite get_value or provide a func argument.")
def __call__(self, f):
self.compute_transform_value = f
return self.handle_data
def _extract_field_names(self, event):
# extract field names from sids (price, volume etc), make sure
# every sid has the same fields.
sid_keys = []
for sid in itervalues(event.data):
keys = set([name for name, value in sid.items()
if isinstance(value,
(int,
float,
numpy.integer,
numpy.float,
numpy.long))
])
sid_keys.append(keys)
# with CUSTOM data events, there may be different fields
# per sid. So the allowable keys are the union of all events.
union = set.union(*sid_keys)
unwanted_fields = {
'portfolio',
'sid',
'dt',
'type',
'source_id',
'_initial_len',
}
return union - unwanted_fields
def _get_field_names(self, event):
if self.initial_field_names is not None:
return self.initial_field_names
else:
self.latest_names = self._extract_field_names(event)
return set.union(self.field_names, self.latest_names)
def batch_transform(func):
"""Decorator function to use instead of inheriting from BatchTransform.
For an example on how to use this, see the doc string of BatchTransform.
"""
@functools.wraps(func)
def create_window(*args, **kwargs):
# passes the user defined function to BatchTransform which it
# will call instead of self.get_value()
return BatchTransform(*args, func=func, **kwargs)
return create_window
| apache-2.0 |
RTS2/rts2 | scripts/u_point/u_point/quick_analysis.py | 3 | 7630 | #!/usr/bin/env python3
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
'''
Make decision about exposure
'''
__author__ = 'wildi.markus@bluewin.ch'
import re
from astropy.io import fits
import u_point.sextractor_3 as sextractor_3
#import sextractor_3 as sextractor_3
# Background: 4085.92 RMS: 241.384 / Threshold: 651.737
pat = re.compile('.*?Background:[ ]+([0-9.]+)[ ]+RMS:[ ]+([0-9.]+)[ /]+Threshold:[ ]+([0-9.]+).*?')
class QuickAnalysis(object):
def __init__(self,lg=None,ds9_display=None,background_max=None,peak_max=None,objects_min=None,exposure_interval=None,ratio_interval=None):
self.lg=lg
self.ds9_display=ds9_display
self.background_max=background_max
self.peak_max=peak_max
self.objects_min=objects_min
self.exposure_interval=exposure_interval
self.ratio_interval=ratio_interval
self.display=None
def analyze(self,nml_id=None,exposure_last=None,ptfn=None):
sx = sextractor_3.Sextractor(['EXT_NUMBER','X_IMAGE','Y_IMAGE','MAG_BEST','FLAGS','CLASS_STAR','FWHM_IMAGE','A_IMAGE','B_IMAGE','FLUX_MAX','BACKGROUND','THRESHOLD'],sexpath='/usr/bin/sextractor',sexconfig='/usr/share/sextractor/default.sex',starnnw='/usr/share/sextractor/default.nnw',verbose=True)
stdo=stde=None
try:
stdo,stde=sx.runSExtractor(filename=ptfn)
except Exception as e:
self.lg.error('quick analyze exception: {}, stde: {}'.format(e,stde))
return None,None
if len(sx.objects)==0:
self.lg.warn('quick analyze: no sextracted objects: {}'.format(ptfn))
return None,None
stde_str=stde.decode('utf-8').replace('\n','')
# Background: 4085.92 RMS: 241.384 / Threshold: 651.737
m=pat.match(stde_str)
if m is not None:
background=float(m.group(1))
background_rms=float(m.group(2))
treshold=float(m.group(3))
else:
self.lg.warn('quick analyze sextract: no background found for file: {}'.format(ptfn))
return None,None
background_f=True
if background > self.background_max:
background_f=False
self.lg.info('quick analyze sextract: background {0:6.1f} > {1:6.1f}'.format(background,self.background_max))
sx.sortObjects(sx.get_field('MAG_BEST'))
i_f = sx.get_field('FLAGS')
try:
sxobjs=[x for x in sx.objects if x[i_f]==0] # only the good ones
sxobjs_len=len(sxobjs)
self.lg.debug('quick analyze:id: {0}, number of sextract objects: {1}, fn: {2} '.format(nml_id,sxobjs_len,ptfn))
except:
self.lg.warn('quick analyze:id: {0}, no sextract result for: {1} '.format(nml_id,ptfn))
return None,None
if sxobjs_len < self.objects_min:
self.lg.debug('quick analyze sextract: found objects {0:6.1f} < {1:6.1f}'.format(len(sxobjs),self.objects_min))
i_x = sx.get_field('X_IMAGE')
i_y = sx.get_field('Y_IMAGE')
i_m = sx.get_field('FLUX_MAX')
i_h = sx.get_field('FWHM_IMAGE')
# fetch from brightest object the brightest pixel value
image_data = fits.getdata(ptfn)
# x=y=0 : lower left
# x=y=max : upper right
# matplotlib x=y=0 up, max lower right
pixel_values=list()
x_l=list()
y_l=list()
for x in range(0,image_data.shape[0]):
dx2=(x-sxobjs[0][i_y])**2
for y in range(0,image_data.shape[1]):
dy2=(y-sxobjs[0][i_x])**2
if dx2 + dy2 < 10. * sxobjs[0][i_h]:
# matplotlib!
x_l.append(y)
y_l.append(x)
pixel_values.append(image_data[x][y])
peak=max(pixel_values)
peak_f=True
if peak > self.peak_max:
peak_f=False
self.lg.info('quick analyze sextract: peak {0:6.1f} > {1:6.1f}'.format(peak,self.peak_max))
ratio= (max(pixel_values)-background)/(self.peak_max-background)
if self.ratio_interval[0] < ratio < self.ratio_interval[0]:
redo=False
exposure_current=exposure_last
self.lg.debug('quick analyze: exposure unchanged, not redoing')
else:
redo=True
exposure_current=exposure_last / ratio
if exposure_current > self.exposure_interval[1]:
exposure_current=self.exposure_interval[1]
self.lg.warn('quick analyze: exposure exceeds maximum, limiting')
elif exposure_current < self.exposure_interval[0]:
exposure_current=self.exposure_interval[0]
self.lg.warn('quick analyze: exposure exceeds minimum, limiting')
self.lg.info('quick analyze: exposure last: {0:6.3f}, current: {1:6.3f}, min: {2:6.3f}, max: {3:6.3f}, peak image: {4:6.1f}, value max: {5:6.1f}, ratio: {6:6.4f}'.format(exposure_last, exposure_current,self.exposure_interval[0],self.exposure_interval[1],peak,self.peak_max,ratio))
if False: # for debugging only
import matplotlib.pyplot as plt
plt.imshow(image_data, cmap='gray')
plt.scatter(x_l,y_l, c='yellow')
plt.show()
if self.ds9_display:
# absolute
self.display_fits(fn=ptfn, sxobjs=sxobjs,i_x=i_x,i_y=i_y)
return exposure_current,redo
def display_fits(self,fn=None,sxobjs=None,i_x=None,i_y=None):
if self.display is None:
from pyds9 import DS9
import time
try:
self.display = DS9()
time.sleep(10.)
except ValueError as e:
self.lg.info('quick analyze display_fits: ds9 died, retrying, error: {}'.format(e))
return
try:
self.display.set('file {0}'.format(fn))
self.display.set('scale zscale')
except ValueError as e:
self.display=None
self.lg.info('quick analyze display_fits: ds9 died, retrying, error: {}'.format(e))
return
for x in sxobjs:
# the brightest is first
self.display.set('regions', 'image; circle {0} {1} 10'.format(x[i_x],x[i_y]))
break
if __name__ == "__main__":
import argparse
import logging,sys
parser= argparse.ArgumentParser(prog=sys.argv[0], description='Check exposure analysis')
parser.add_argument('--level', dest='level', default='DEBUG', help=': %(default)s, debug level')
parser.add_argument('--toconsole', dest='toconsole', action='store_true', default=False, help=': %(default)s, log to console')
args=parser.parse_args()
filename='/tmp/{}.log'.format(sys.argv[0].replace('.py','')) # ToDo datetime, name of the script
logformat= '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
logging.basicConfig(filename=filename, level=args.level.upper(), format= logformat)
logger=logging.getLogger()
if args.toconsole:
# http://www.mglerner.com/blog/?p=8
soh=logging.StreamHandler(sys.stdout)
soh.setLevel(args.level)
logger.addHandler(soh)
qa=QuickAnalysis(lg=logger,base_path='/tmp/u_point',ds9_display=True,background_max=45000.,peak_max=50000.,objects_min=100.,exposure_interval=[1.,60.])
qa.analyze(nml_id=-1,image_fn='dss_16_889203_m70_085265.fits',exp=10.)
| lgpl-3.0 |
shipci/sympy | sympy/plotting/tests/test_plot.py | 17 | 8476 | from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import warnings
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
Tjorriemorrie/trading | 07_reinforcement/rep-daily.py | 2 | 3554 | '''
looping, infinite & random
'''
import pandas as pd
import numpy as np
from features import FeatureFactory
import pickle
from random import random, choice
from pprint import pprint
import time
currencies = [
'AUDUSD',
'EURGBP',
'EURJPY',
'EURUSD',
'GBPJPY',
'GBPUSD',
'NZDUSD',
'USDCAD',
'USDCHF',
'USDJPY',
]
intervals = [
# '60',
'1440',
]
actions = [
'stay-out',
'enter-long',
'stay-long',
'exit-long',
'enter-short',
'stay-short',
'exit-short',
]
def loadData(currency, interval):
# print 'loading dataframe...'
df = pd.read_csv(
r'../data/' + currency.upper() + interval + '.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'},
# parse_dates=[[0, 1]],
# index_col=0,
)
# print df.tail()
data = df.as_matrix()
opens = data[:, 2].astype(float)
highs = data[:, 3].astype(float)
lows = data[:, 4].astype(float)
closes = data[:, 5].astype(float)
volumes = data[:, 6].astype(int)
# print 'dataframe loaded'
return opens, highs, lows, closes, volumes
def loadThetas(currency, interval, cntFeatures):
# print 'loading thetas'
try:
with open('models/{0}_{1}.thts'.format(currency, interval), 'rb') as f:
thetas = pickle.load(f)
except IOError:
thetas = [np.random.rand(cntFeatures) for a in actions]
# pprint(thetas)
# print 'thetas loaded'
return thetas
def saveThetas(currency, interval, thetas):
# print 'saving thetas'
with open('models/{0}_{1}.thts'.format(currency, interval), 'wb') as f:
pickle.dump(thetas, f)
# print 'thetas saved'
def getActionStateValue(thetas, Fsa, a):
# pprint(Fsa)
# pprint(thetas[a])
Qsa = sum(f * t for f, t in zip(Fsa, thetas[a]))
return float(Qsa)
def getActionsAvailable(a):
# stay-out: stay-out & enter-long & enter-short
if a == 0:
return [0, 1, 4]
elif a == 1:
return [2]
elif a == 2:
return [2, 3]
elif a == 4:
return [5]
elif a == 5:
return [5, 6]
else:
raise Exception('no available actions for {0}'.format(a))
def getAction(thetas, features, a):
# exploration
actionsAvailable = getActionsAvailable(a)
aMax = None
QsaHighest = -1000
for a in actionsAvailable:
Qsa = getActionStateValue(thetas, features[a], a)
if Qsa > QsaHighest:
QsaHighest = Qsa
aMax = a
a = aMax
return a
def getActionStateValue(thetas, Fsa, a):
# pprint(Fsa)
# pprint(thetas[a])
Qsa = sum(f * t for f, t in zip(Fsa, thetas[a]))
return float(Qsa)
ff = FeatureFactory()
alpha = 0.1
epsilon = 0.1
gamma = 0.9
if __name__ == '__main__':
interval = choice(intervals)
for currency in currencies:
# load data
opens, highs, lows, closes, volumes = loadData(currency, interval)
dataSize = len(closes)
# extract features
features = ff.getFeatures(opens, highs, lows, closes, volumes)
# pprint(features)
# load thetas
thetas = loadThetas(currency, interval, len(features))
# repeat to get trade
a = 0
start = len(features) - 500
for i in xrange(start, len(features)):
aa = getAction(thetas, features, a)
a = aa
# display last action
print currency, actions[a] | mit |
rrohan/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
adykstra/mne-python | tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py | 2 | 10057 | """
.. _tut-timefreq-twoway-anova:
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to
multiple comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
###############################################################################
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
###############################################################################
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_freqs * n_times)
# so we have replications * conditions * observations:
print(data.shape)
###############################################################################
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table:: Sample data layout
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... ... ... ... ...
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect = np.ma.masked_array(effect, [sig > .05])
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None)
###############################################################################
# Create new stats image with only significant clusters:
good_clusters = np.where(cluster_p_values < .05)[0]
T_obs_plot = np.ma.masked_array(T_obs,
np.invert(clusters[np.squeeze(good_clusters)]))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" cluster-level corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Now using FDR:
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" FDR corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Both cluster level and FDR correction help get rid of
# putatively spots we saw in the naive f-images.
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/nltk/probability.py | 5 | 87595 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
# Liang Dong <ldong@clemson.edu> (additions)
# Geoffrey Sampson <sampson@cantab.net> (additions)
# Ilia Kurenkov <ilia.kurenkov@gmail.com> (additions)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function, unicode_literals, division
import math
import random
import warnings
import array
from operator import itemgetter
from collections import defaultdict
from functools import reduce
from nltk import compat
from nltk.compat import Counter
from nltk.internals import raise_unorderable_types
_NINF = float('-1e300')
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class FreqDist(Counter):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist[word.lower()] += 1
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
Counter.__init__(self, samples)
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return sum(self.values())
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
return self.r_Nr(bins)[r]
def r_Nr(self, bins=None):
"""
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
_r_Nr = defaultdict(int)
for count in self.values():
_r_Nr[count] += 1
# Special case for Nr[0]:
_r_Nr[0] = bins - self.B() if bins is not None else 0
return _r_Nr
def _cumulative_frequencies(self, samples):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type samples: any
:rtype: list(float)
"""
cf = 0.0
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self.N() == 0:
return 0
return self[sample] / self.N()
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted.
:param samples: The samples to plot (default is all samples)
:type samples: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
width = max(len("%s" % s) for s in samples)
width = max(width, max(len("%d" % f) for f in freqs))
for i in range(len(samples)):
print("%*s" % (width, samples[i]), end=' ')
print()
for i in range(len(samples)):
print("%*d" % (width, freqs[i]), end=' ')
print()
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
# Mathematical operatiors
def __add__(self, other):
"""
Add counts from two counters.
>>> FreqDist('abbb') + FreqDist('bcc')
FreqDist({'b': 4, 'c': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__add__(other))
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
>>> FreqDist('abbbc') - FreqDist('bccd')
FreqDist({'b': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__sub__(other))
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
>>> FreqDist('abbb') | FreqDist('bcc')
FreqDist({'b': 3, 'c': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__or__(other))
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
>>> FreqDist('abbb') & FreqDist('bcc')
FreqDist({'b': 1})
"""
return self.__class__(super(FreqDist, self).__and__(other))
def __le__(self, other):
if not isinstance(other, FreqDist):
raise_unorderable_types("<=", self, other)
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ge__ = lambda self, other: not self <= other or self == other
__lt__ = lambda self, other: self <= other and not self == other
__gt__ = lambda self, other: not self <= other
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return self.pformat()
def pprint(self, maxlen=10, stream=None):
"""
Print a string representation of this FreqDist to 'stream'
:param maxlen: The maximum number of items to print
:type maxlen: int
:param stream: The stream to print to. stdout by default
"""
print(self.pformat(maxlen=maxlen), file=stream)
def pformat(self, maxlen=10):
"""
Return a string representation of this FreqDist.
:param maxlen: The maximum number of items to display
:type maxlen: int
:rtype: string
"""
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
p_init = p
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, p_init-p))
return random.choice(list(self.samples()))
@compat.python_2_unicode_compatible
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
@compat.python_2_unicode_compatible
class RandomProbDist(ProbDistI):
"""
Generates a random probability distribution whereby each sample
will be between 0 and 1 with equal probability (uniform random distribution.
Also called a continuous uniform distribution).
"""
def __init__(self, samples):
if len(samples) == 0:
raise ValueError('A probability distribution must '+
'have at least one sample.')
self._probs = self.unirand(samples)
self._samples = list(self._probs.keys())
@classmethod
def unirand(cls, samples):
"""
The key function that creates a randomized initial distribution
that still sums to 1. Set as a dictionary of prob values so that
it can still be passed to MutableProbDist and called with identical
syntax to UniformProbDist
"""
samples = set(samples)
randrow = [random.random() for i in range(len(samples))]
total = sum(randrow)
for i, x in enumerate(randrow):
randrow[i] = x/total
total = sum(randrow)
if total != 1:
#this difference, if present, is so small (near NINF) that it
#can be subtracted from any element without risking probs not (0 1)
randrow[-1] -= total - 1
return dict((s, randrow[i]) for i, s in enumerate(samples))
def prob(self, sample):
return self._probs.get(sample, 0)
def samples(self):
return self._samples
def __repr__(self):
return '<RandomUniformProbDist with %d samples>' %len(self._probs)
@compat.python_2_unicode_compatible
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probability to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if len(prob_dict) == 0:
raise ValueError('A DictionaryProbDist must have at least one sample ' +
'before it can be normalized.')
if log:
value_sum = sum_logs(list(self._prob_dict.values()))
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
@compat.python_2_unicode_compatible
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.B())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
r_Nr = base_fdist.r_Nr(bins)
Nr = [r_Nr[r] for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
@compat.python_2_unicode_compatible
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([list(fd) for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
@compat.python_2_unicode_compatible
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()' % freqdist.B()
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / (self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / (self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probability Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# Good-Turing method calculates the probability mass to assign to
# events with zero or low counts based on the number of events with
# higher counts. It does so by using the adjusted count *c\**:
#
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
# - *things with frequency zero in training* = N(1) for c == 0
#
# where *c* is the original count, *N(i)* is the number of event types
# observed with count *i*. We can think the count of unseen as the count
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
#
# This method is problematic because the situation ``N(c+1) == 0``
# is quite common in the original Good-Turing estimation; smoothing or
# interpolation of *N(i)* values is essential in practice.
#
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greater than the standard deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to frequency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the frequency and
yi denotes the frequency of frequency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
SUM_TO_ONE = False
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1)
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr_non_zero(self):
r_Nr = self._freqdist.r_Nr()
del r_Nr[0]
return r_Nr
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
nonzero = self._r_Nr_non_zero()
if not nonzero:
return [], []
return zip(*sorted(nonzero.items()))
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = sum(log_r) / len(log_r)
y_mean = sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
if self._slope >= -1:
warnings.warn('SimpleGoodTuring did not find a proper best fit '
'line for smoothing probabilities of occurrences. '
'The probability estimates are likely to be '
'unreliable.')
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of frequency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = self._freqdist.Nr(count+1)
Er = self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = array.array(str("d"), [0.0]) * len(samples)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##/////////////////////////////////////////////////////
## Kneser-Ney Probability Distribution
##//////////////////////////////////////////////////////
# This method for calculating probabilities was introduced in 1995 by Reinhard
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
# models that use backing-off to deal with sparse data. The authors propose two
# ways of doing so: a marginal distribution constraint on the back-off
# distribution and a leave-one-out distribution. For a start, the first one is
# implemented as a class below.
#
# The idea behind a back-off n-gram model is that we have a series of
# frequency distributions for our n-grams so that in case we have not seen a
# given n-gram during training (and as a result have a 0 probability for it) we
# can 'back off' (hence the name!) and try testing whether we've seen the
# n-1-gram part of the n-gram in training.
#
# The novelty of Kneser and Ney's approach was that they decided to fiddle
# around with the way this latter, backed off probability was being calculated
# whereas their peers seemed to focus on the primary probability.
#
# The implementation below uses one of the techniques described in their paper
# titled "Improved backing-off for n-gram language modeling." In the same paper
# another technique is introduced to attempt to smooth the back-off
# distribution as well as the primary one. There is also a much-cited
# modification of this method proposed by Chen and Goodman.
#
# In order for the implementation of Kneser-Ney to be more efficient, some
# changes have been made to the original algorithm. Namely, the calculation of
# the normalizing function gamma has been significantly simplified and
# combined slightly differently with beta. None of these changes affect the
# nature of the algorithm, but instead aim to cut out unnecessary calculations
# and take advantage of storing and retrieving information in dictionaries
# where possible.
@compat.python_2_unicode_compatible
class KneserNeyProbDist(ProbDistI):
"""
Kneser-Ney estimate of a probability distribution. This is a version of
back-off that counts how likely an n-gram is provided the n-1-gram had
been seen in training. Extends the ProbDistI interface, requires a trigram
FreqDist instance to train on. Optionally, a different from default discount
value can be specified. The default discount is set to 0.75.
"""
def __init__(self, freqdist, bins=None, discount=0.75):
"""
:param freqdist: The trigram frequency distribution upon which to base
the estimation
:type freqdist: FreqDist
:param bins: Included for compatibility with nltk.tag.hmm
:type bins: int or float
:param discount: The discount applied when retrieving counts of
trigrams
:type discount: float (preferred, but can be set to int)
"""
if not bins:
self._bins = freqdist.B()
else:
self._bins = bins
self._D = discount
# cache for probability calculation
self._cache = {}
# internal bigram and trigram frequency distributions
self._bigrams = defaultdict(int)
self._trigrams = freqdist
# helper dictionaries used to calculate probabilities
self._wordtypes_after = defaultdict(float)
self._trigrams_contain = defaultdict(float)
self._wordtypes_before = defaultdict(float)
for w0, w1, w2 in freqdist:
self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)]
self._wordtypes_after[(w0,w1)] += 1
self._trigrams_contain[w1] += 1
self._wordtypes_before[(w1,w2)] += 1
def prob(self, trigram):
# sample must be a triple
if len(trigram) != 3:
raise ValueError('Expected an iterable with 3 members.')
trigram = tuple(trigram)
w0, w1, w2 = trigram
if trigram in self._cache:
return self._cache[trigram]
else:
# if the sample trigram was seen during training
if trigram in self._trigrams:
prob = (self._trigrams[trigram]
- self.discount())/self._bigrams[(w0, w1)]
# else if the 'rougher' environment was seen during training
elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before:
aftr = self._wordtypes_after[(w0, w1)]
bfr = self._wordtypes_before[(w1, w2)]
# the probability left over from alphas
leftover_prob = ((aftr * self.discount())
/ self._bigrams[(w0, w1)])
# the beta (including normalization)
beta = bfr /(self._trigrams_contain[w1] - aftr)
prob = leftover_prob * beta
# else the sample was completely unseen during training
else:
prob = 0.0
self._cache[trigram] = prob
return prob
def discount(self):
"""
Return the value by which counts are discounted. By default set to 0.75.
:rtype: float
"""
return self._D
def set_discount(self, discount):
"""
Set the value by which counts are discounted to the value of discount.
:param discount: the new value to discount counts by
:type discount: float (preferred, but int possible)
:rtype: None
"""
self._D = discount
def samples(self):
return self._trigrams.keys()
def max(self):
return self._trigrams.max()
def __repr__(self):
'''
Return a string representation of this ProbDist
:rtype: str
'''
return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N())
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = (pdist.prob(s) for s in pdist.samples())
return -sum(p * math.log(p,2) for p in probs)
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in compat.itervalues(self))
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param conditions: The conditions to plot (default is all)
:type conditions: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
width = max(len("%s" % s) for s in samples)
freqs = dict()
for c in conditions:
if cumulative:
freqs[c] = list(self[c]._cumulative_frequencies(samples))
else:
freqs[c] = [self[c][sample] for sample in samples]
width = max(width, max(len("%d" % f) for f in freqs[c]))
condition_size = max(len("%s" % c) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%*s" % (width, s), end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, c), end=' ')
for f in freqs[c]:
print("%*d" % (width, f), end=' ')
print()
# Mathematical operators
def __add__(self, other):
"""
Add counts from two ConditionalFreqDists.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] + other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count > 0:
result[cond][elem] = count
return result
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] - other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count < 0:
result[cond][elem] = 0 - count
return result
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] | other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count > 0:
result[cond][elem] = count
return result
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] & other[cond]
if newfreqdist:
result[cond] = newfreqdist
return result
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
@compat.python_2_unicode_compatible
class ConditionalProbDistI(dict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return list(self.keys())
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modeling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.corpus import brown
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> cpdist['passed'].max()
'VBD'
>>> cpdist['passed'].prob('VBD')
0.423...
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
self._probdist_factory = probdist_factory
self._factory_args = factory_args
self._factory_kw_args = factory_kw_args
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
def __missing__(self, key):
self[key] = self._probdist_factory(FreqDist(),
*self._factory_args,
**self._factory_kw_args)
return self[key]
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
self.update(probdist_dict)
def __missing__(self, key):
self[key] = DictionaryProbDist()
return self[key]
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1 + numsamples) // 2) +
random.randint(0, numsamples // 2))
fdist[y] += 1
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1 + numsamples) // 2 + 1):
for y in range(0, numsamples // 2 + 1):
fdist[x+y] += 1
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
sgt = SimpleGoodTuringProbDist(fd)
print('%18s %8s %14s' \
% ("word", "freqency", "SimpleGoodTuring"))
fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True))
for key in fd_keys_sorted:
print('%18s %8d %14e' \
% (key, fd[key], sgt.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| apache-2.0 |
massmutual/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
PythonSanSebastian/pyper_the_bot | configs/pyper_cloud.py | 1 | 2606 |
import os
import os.path as op
DEBUG = False
DAEMON = False
BOT_NAME = "pyper"
SLACK_TOKEN = os.environ['PYPER_KEY']
ROOT_DIR = op.join(op.dirname(op.abspath(__file__)), '..')
CONFIG_DIR = ROOT_DIR
LOGFILE = op.join(CONFIG_DIR, 'pyper_the_bot.log')
IMPLANTS_DIR = op.join(ROOT_DIR, 'implants')
GOOGLE_API_KEY_FILE = op.join(IMPLANTS_DIR, 'google_api_key.json')
# voting groups
VOTING_GROUPS = {'admin': ['malemburg', 'oiertwo'],
'finance': ['malemburg', 'darya_ep'],
'sponsors': [],
'comm': [],
'support': [],
'finaid': ['malemburg', 'darya_ep'],
'md': ['alexsavio', 'malemburg', 'darya_ep'],
'program': ['alexsavio', 'alex.hendorf', 'raul.cumplido', 'barrachri'],
'web': ['barrachri', 'patrick', 'stephane.wirtel'],
'media': [],
'onsite': ['alexsavio', 'darya_ep', 'oiertwo'],
'coc': [],}
# voting chairs
VOTING_CHAIRS = {'admin': ['malemburg'],
'finance': [],
'sponsors': [],
'comm': [],
'support': [],
'finaid': [],
'md': [],
'program': ['alexsavio', 'alex.hendorf'],
'web': ['barrachri'],
'media': [],
'onsite': ['oiertwo'],
'coc': [],}
# IMPLANTS: {IMPLANT_NAME: {'file': PYFILE_PATH, 'config': {IMPLANT_CONFIG_PARAMETERS}}
IMPLANTS = {
# # requires: pyjokes
'PyJokes': {'file': op.join(IMPLANTS_DIR, 'pyjokes_implant.py')},
'Help': {'file': op.join(IMPLANTS_DIR, 'help_implant.py')},
# requires dataset
'VotingRounds': {'file': op.join(IMPLANTS_DIR, 'vote_implant.py'),
'config': {'db_file_path': op.join(CONFIG_DIR, 'vote_rounds.db'),
'vote_groups': VOTING_GROUPS,
'vote_chairs': VOTING_CHAIRS}
},
}
# ping: {file: 'implants/ping.py'},
# hackernews: {file: 'implants/hackernews.py',
# config: ''},
# requires: gspread oauth2client docstamp pandas pyopenssl
# Sponsors: {file: implants/sponsors_implant.py,
# config: {google_api_key_file: *google_api_file,
# archiver: {file: implants/archiver_implant.py,
# config: {wd: downloads},
# allowed_users: ['alexsavio'],
# allowed_channels: ['']},
# Transitions: {file: implants/transitions_implant.py},
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 15 | 31142 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (
_check_dtype(self._y.dtype) if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| mit |
aerokappa/SantaClaus | automaticOptimization.py | 1 | 2195 | import numpy as np
import pandas as pd
import itertools
from fillBags import fillOneBag
def genUseCases ( ):
nHorse = 0
nBall = 0
nBike = 0
nTrain = 0
nCoal = 0
nBook = 0
nDoll = 0
nBlocks = 0
nGloves = 0
wHorse = 5
wBall = 2
wBike = 20
wTrain = 10
wCoal = 23
wBook = 2
wDoll = 5
wBlocks = 10
wGloves = 2
targetWeight = 50
return np.array([np.array([ nHorse, nBall, nBike, nTrain, nCoal, nBook, nDoll, nBlocks, nGloves]) \
for nCoal in np.arange(0, (targetWeight)/wCoal + 1) \
for nBike in np.arange(0, (targetWeight - nCoal*wCoal)/wBike + 1) \
for nTrain in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike)/wTrain + 1) \
for nBlocks in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike - nTrain*wTrain)/wBlocks + 1) \
for nHorse in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike - nTrain*wTrain - nBlocks*wBlocks)/wHorse + 1) \
for nDoll in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike - nTrain*wTrain - nBlocks*wBlocks - nHorse*wHorse)/wDoll + 1) \
for nBall in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike - nTrain*wTrain - nBlocks*wBlocks - nHorse*wHorse - nDoll*wDoll)/wBall + 1) \
for nBook in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike - nTrain*wTrain - nBlocks*wBlocks - nHorse*wHorse - nDoll*wDoll - nBall*wBall)/wBook + 1) \
for nGloves in np.arange(0, (targetWeight - nCoal*wCoal - nBike*wBike - nTrain*wTrain - nBlocks*wBlocks - nHorse*wHorse - nDoll*wDoll - nBall*wBall - nBook*wBook)/wGloves + 1) \
])
def getBagWeights( bagItemCounts, giftListSummary ):
bagList = genUseCases()
nBags = len(bagList)
bagWeight_mean = np.zeros(nBags)
bagWeight_std = np.zeros(nBags)
for i, currBag in enumerate(bagList):
if (i%1000==0):
print nBags, i
bagWeights = fillOneBag( currBag, giftListSummary)
if np.size(bagWeights) != 1:
bagWeights[ bagWeights >= 50 ] = 0
bagWeight_mean[i] = np.mean(bagWeights)
bagWeight_std[i] = np.std(bagWeights)
return bagWeight_mean, bagWeight_std | mit |
f3r/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 104 | 2132 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
kashif/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
dssg/wikienergy | disaggregator/build/pandas/pandas/stats/tests/test_fama_macbeth.py | 16 | 2089 | from pandas import DataFrame, Panel
from pandas.stats.api import fama_macbeth
from .common import assert_almost_equal, BaseTest
from pandas.compat import range
from pandas import compat
import numpy as np
class TestFamaMacBeth(BaseTest):
def testFamaMacBethRolling(self):
# self.checkFamaMacBethExtended('rolling', self.panel_x, self.panel_y,
# nw_lags_beta=2)
# df = DataFrame(np.random.randn(50, 10))
x = dict((k, DataFrame(np.random.randn(50, 10))) for k in 'abcdefg')
x = Panel.from_dict(x)
y = (DataFrame(np.random.randn(50, 10)) +
DataFrame(0.01 * np.random.randn(50, 10)))
self.checkFamaMacBethExtended('rolling', x, y, nw_lags_beta=2)
self.checkFamaMacBethExtended('expanding', x, y, nw_lags_beta=2)
def checkFamaMacBethExtended(self, window_type, x, y, **kwds):
window = 25
result = fama_macbeth(y=y, x=x, window_type=window_type, window=window,
**kwds)
self._check_stuff_works(result)
index = result._index
time = len(index)
for i in range(time - window + 1):
if window_type == 'rolling':
start = index[i]
else:
start = index[0]
end = index[i + window - 1]
x2 = {}
for k, v in compat.iteritems(x):
x2[k] = v.truncate(start, end)
y2 = y.truncate(start, end)
reference = fama_macbeth(y=y2, x=x2, **kwds)
assert_almost_equal(reference._stats, result._stats[:, i])
static = fama_macbeth(y=y2, x=x2, **kwds)
self._check_stuff_works(static)
def _check_stuff_works(self, result):
# does it work?
attrs = ['mean_beta', 'std_beta', 't_stat']
for attr in attrs:
getattr(result, attr)
# does it work?
result.summary
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Static_Examples/Cosserat/plate_circular_hole/classical_elasticity/plot.py | 5 | 1239 | import numpy as np
import matplotlib.pyplot as plt
import h5py
def h52stressStrain(h5in_filename):
h5in=h5py.File(h5in_filename,"r")
outputs_all=h5in['/Model/Elements/Gauss_Outputs'][()]
stress = outputs_all[16 , 1:-1]
strain = outputs_all[4 , 1:-1]
return [stress, strain]
[stress_load, strain_load] = h52stressStrain("vm_2shearing.h5.feioutput")
[stress_unload, strain_unload] = h52stressStrain("vm_3unloading.h5.feioutput")
[stress_reload, strain_reload] = h52stressStrain("vm_4reloading.h5.feioutput")
stress = np.concatenate((stress_load,stress_unload,stress_reload))
strain = np.concatenate((strain_load,strain_unload,strain_reload))
# plt.plot(strain, stress)
# plt.show()
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'xx-large',
'figure.figsize': (10, 8),
'axes.labelsize': 'xx-large',
'axes.titlesize':'xx-large',
'xtick.labelsize':'xx-large',
'ytick.labelsize':'xx-large'}
pylab.rcParams.update(params)
plt.plot(strain, stress, 'k', linewidth=3)
plt.xlabel('Strain [unitless]')
plt.ylabel('Stress [Pa]')
plt.title('Material Behavior: Stress-Strain')
plt.grid()
plt.box()
plt.savefig('result.pdf', transparent=True, bbox_inches='tight')
plt.show() | cc0-1.0 |
shaunstanislaus/ibis | ibis/sql/tests/test_exprs.py | 5 | 27273 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from ibis.sql.exprs import ExprTranslator
from ibis.sql.compiler import QueryContext, to_sql
from ibis.expr.tests.mocks import MockConnection
from ibis.compat import unittest
import ibis.expr.types as ir
import ibis
class ExprSQLTest(object):
def _check_expr_cases(self, cases, context=None, named=False):
for expr, expected in cases:
repr(expr)
result = self._translate(expr, named=named, context=context)
assert result == expected
def _translate(self, expr, named=False, context=None):
translator = ExprTranslator(expr, context=context, named=named)
return translator.get_result()
class TestValueExprs(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
self.int_cols = ['a', 'b', 'c', 'd']
self.bool_cols = ['h']
self.float_cols = ['e', 'f']
def _check_literals(self, cases):
for value, expected in cases:
lit_expr = ibis.literal(value)
result = self._translate(lit_expr)
assert result == expected
def test_string_literals(self):
cases = [
('simple', "'simple'"),
('I can\'t', "'I can\\'t'"),
('An "escape"', "'An \"escape\"'")
]
for value, expected in cases:
lit_expr = ibis.literal(value)
result = self._translate(lit_expr)
assert result == expected
def test_decimal_builtins(self):
t = self.con.table('tpch_lineitem')
col = t.l_extendedprice
cases = [
(col.precision(), 'precision(l_extendedprice)'),
(col.scale(), 'scale(l_extendedprice)'),
]
self._check_expr_cases(cases)
def test_number_boolean_literals(self):
cases = [
(5, '5'),
(1.5, '1.5'),
(True, 'TRUE'),
(False, 'FALSE')
]
self._check_literals(cases)
def test_column_ref_table_aliases(self):
context = QueryContext()
table1 = ibis.table([
('key1', 'string'),
('value1', 'double')
])
table2 = ibis.table([
('key2', 'string'),
('value and2', 'double')
])
context.set_alias(table1, 't0')
context.set_alias(table2, 't1')
expr = table1['value1'] - table2['value and2']
result = self._translate(expr, context=context)
expected = 't0.value1 - t1.`value and2`'
assert result == expected
def test_column_ref_quoting(self):
schema = [('has a space', 'double')]
table = ibis.table(schema)
self._translate(table['has a space'], '`has a space`')
def test_identifier_quoting(self):
schema = [('date', 'double'), ('table', 'string')]
table = ibis.table(schema)
self._translate(table['date'], '`date`')
self._translate(table['table'], '`table`')
def test_named_expressions(self):
a, b, g = self.table.get_columns(['a', 'b', 'g'])
cases = [
(g.cast('double').name('g_dub'), 'CAST(g AS double) AS `g_dub`'),
(g.name('has a space'), 'g AS `has a space`'),
(((a - b) * a).name('expr'), '(a - b) * a AS `expr`')
]
return self._check_expr_cases(cases, named=True)
def test_binary_infix_operators(self):
# For each function, verify that the generated code is what we expect
a, b, h = self.table.get_columns(['a', 'b', 'h'])
bool_col = a > 0
cases = [
(a + b, 'a + b'),
(a - b, 'a - b'),
(a * b, 'a * b'),
(a / b, 'a / b'),
(a ** b, 'pow(a, b)'),
(a < b, 'a < b'),
(a <= b, 'a <= b'),
(a > b, 'a > b'),
(a >= b, 'a >= b'),
(a == b, 'a = b'),
(a != b, 'a != b'),
(h & bool_col, 'h AND (a > 0)'),
(h | bool_col, 'h OR (a > 0)'),
# xor is brute force
(h ^ bool_col, '(h OR (a > 0)) AND NOT (h AND (a > 0))')
]
self._check_expr_cases(cases)
def test_binary_infix_parenthesization(self):
a, b, c = self.table.get_columns(['a', 'b', 'c'])
cases = [
((a + b) + c, '(a + b) + c'),
(a.log() + c, 'ln(a) + c'),
(b + (-(a + c)), 'b + (-(a + c))')
]
self._check_expr_cases(cases)
def test_between(self):
cases = [
(self.table.f.between(0, 1), 'f BETWEEN 0 AND 1')
]
self._check_expr_cases(cases)
def test_isnull_notnull(self):
cases = [
(self.table['g'].isnull(), 'g IS NULL'),
(self.table['a'].notnull(), 'a IS NOT NULL'),
((self.table['a'] + self.table['b']).isnull(), 'a + b IS NULL')
]
self._check_expr_cases(cases)
def test_casts(self):
a, d, g = self.table.get_columns(['a', 'd', 'g'])
cases = [
(a.cast('int16'), 'CAST(a AS smallint)'),
(a.cast('int32'), 'CAST(a AS int)'),
(a.cast('int64'), 'CAST(a AS bigint)'),
(a.cast('float'), 'CAST(a AS float)'),
(a.cast('double'), 'CAST(a AS double)'),
(a.cast('string'), 'CAST(a AS string)'),
(d.cast('int8'), 'CAST(d AS tinyint)'),
(g.cast('double'), 'CAST(g AS double)'),
(g.cast('timestamp'), 'CAST(g AS timestamp)')
]
self._check_expr_cases(cases)
def test_misc_conditionals(self):
a = self.table.a
cases = [
(a.nullif(0), 'nullif(a, 0)')
]
self._check_expr_cases(cases)
def test_decimal_casts(self):
cases = [
(ibis.literal('9.9999999').cast('decimal(38,5)'),
"CAST('9.9999999' AS decimal(38,5))"),
(self.table.f.cast('decimal(12,2)'), "CAST(f AS decimal(12,2))")
]
self._check_expr_cases(cases)
def test_negate(self):
cases = [
(-self.table['a'], '-a'),
(-self.table['f'], '-f'),
(-self.table['h'], 'NOT h')
]
self._check_expr_cases(cases)
def test_timestamp_extract_field(self):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'millisecond']
cases = [(getattr(self.table.i, field)(),
"extract(i, '{0}')".format(field))
for field in fields]
self._check_expr_cases(cases)
# integration with SQL translation
expr = self.table[self.table.i.year().name('year'),
self.table.i.month().name('month'),
self.table.i.day().name('day')]
result = to_sql(expr)
expected = \
"""SELECT extract(i, 'year') AS `year`, extract(i, 'month') AS `month`,
extract(i, 'day') AS `day`
FROM alltypes"""
assert result == expected
def test_timestamp_now(self):
cases = [
(ibis.now(), 'now()')
]
self._check_expr_cases(cases)
def test_timestamp_deltas(self):
units = ['year', 'month', 'week', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']
t = self.table.i
f = 'i'
cases = []
for unit in units:
K = 5
offset = getattr(ibis, unit)(K)
template = '{0}s_add({1}, {2})'
cases.append((t + offset, template.format(unit, f, K)))
cases.append((t - offset, template.format(unit, f, -K)))
self._check_expr_cases(cases)
def test_timestamp_literals(self):
tv1 = '2015-01-01 12:34:56'
ex1 = ("'2015-01-01 12:34:56'")
cases = [
(ibis.literal(pd.Timestamp(tv1)), ex1),
(ibis.literal(pd.Timestamp(tv1).to_pydatetime()), ex1),
(ibis.timestamp(tv1), ex1)
]
self._check_expr_cases(cases)
def test_timestamp_from_integer(self):
col = self.table.c
cases = [
(col.to_timestamp(),
'CAST(from_unixtime(c, "yyyy-MM-dd HH:mm:ss") '
'AS timestamp)'),
(col.to_timestamp('ms'),
'CAST(from_unixtime(CAST(c / 1000 AS int), '
'"yyyy-MM-dd HH:mm:ss") '
'AS timestamp)'),
(col.to_timestamp('us'),
'CAST(from_unixtime(CAST(c / 1000000 AS int), '
'"yyyy-MM-dd HH:mm:ss") '
'AS timestamp)'),
]
self._check_expr_cases(cases)
def test_correlated_predicate_subquery(self):
t0 = self.table
t1 = t0.view()
expr = t0.g == t1.g
ctx = QueryContext()
ctx.make_alias(t0)
# Grab alias from parent context
subctx = ctx.subcontext()
subctx.make_alias(t1)
subctx.make_alias(t0)
result = self._translate(expr, context=subctx)
expected = "t0.g = t1.g"
assert result == expected
class TestUnaryBuiltins(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_numeric_monadic_builtins(self):
# No argument functions
functions = ['abs', 'ceil', 'floor', 'exp', 'sqrt', 'sign',
('log', 'ln'),
('approx_median', 'appx_median'),
('approx_nunique', 'ndv'),
'ln', 'log2', 'log10', 'zeroifnull']
cases = []
for what in functions:
if isinstance(what, tuple):
ibis_name, sql_name = what
else:
ibis_name = sql_name = what
for cname in ['double_col', 'int_col']:
expr = getattr(self.table[cname], ibis_name)()
cases.append((expr, '{0}({1})'.format(sql_name, cname)))
self._check_expr_cases(cases)
def test_log_other_bases(self):
cases = [
(self.table.double_col.log(5), 'log(double_col, 5)')
]
self._check_expr_cases(cases)
def test_round(self):
cases = [
(self.table.double_col.round(), 'round(double_col)'),
(self.table.double_col.round(0), 'round(double_col, 0)'),
(self.table.double_col.round(2, ), 'round(double_col, 2)'),
(self.table.double_col.round(self.table.tinyint_col),
'round(double_col, tinyint_col)')
]
self._check_expr_cases(cases)
def test_hash(self):
expr = self.table.int_col.hash()
assert isinstance(expr, ir.Int64Array)
assert isinstance(self.table.int_col.sum().hash(),
ir.Int64Scalar)
cases = [
(self.table.int_col.hash(), 'fnv_hash(int_col)')
]
self._check_expr_cases(cases)
def test_reduction_where(self):
cond = self.table.bigint_col < 70
c = self.table.double_col
tmp = '{0}(CASE WHEN bigint_col < 70 THEN double_col ELSE NULL END)'
cases = [
(c.sum(where=cond), tmp.format('sum')),
(c.count(where=cond), tmp.format('count')),
(c.mean(where=cond), tmp.format('avg')),
(c.max(where=cond), tmp.format('max')),
(c.min(where=cond), tmp.format('min')),
]
self._check_expr_cases(cases)
def test_reduction_invalid_where(self):
condbad_literal = ibis.literal('T')
c = self.table.double_col
for reduction in [c.sum, c.count, c.mean, c.max, c.min]:
with self.assertRaises(TypeError):
reduction(where=condbad_literal)
class TestCaseExprs(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_isnull_1_0(self):
expr = self.table.g.isnull().ifelse(1, 0)
result = self._translate(expr)
expected = 'CASE WHEN g IS NULL THEN 1 ELSE 0 END'
assert result == expected
# inside some other function
result = self._translate(expr.sum())
expected = 'sum(CASE WHEN g IS NULL THEN 1 ELSE 0 END)'
assert result == expected
def test_simple_case(self):
expr = (self.table.g.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default')
.end())
result = self._translate(expr)
expected = """CASE g
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END"""
assert result == expected
def test_search_case(self):
expr = (ibis.case()
.when(self.table.f > 0, self.table.d * 2)
.when(self.table.c < 0, self.table.a * 2)
.end())
result = self._translate(expr)
expected = """CASE
WHEN f > 0 THEN d * 2
WHEN c < 0 THEN a * 2
ELSE NULL
END"""
assert result == expected
def test_where_use_if(self):
expr = ibis.where(self.table.f > 0, self.table.e, self.table.a)
assert isinstance(expr, ir.FloatValue)
result = self._translate(expr)
expected = "if(f > 0, e, a)"
assert result == expected
def test_nullif_ifnull(self):
table = self.con.table('tpch_lineitem')
f = table.l_quantity
cases = [
(f.nullif(f == 0),
'nullif(l_quantity, l_quantity = 0)'),
(f.fillna(0),
'isnull(l_quantity, CAST(0 AS decimal(12,2)))'),
]
self._check_expr_cases(cases)
def test_decimal_fillna_cast_arg(self):
table = self.con.table('tpch_lineitem')
f = table.l_extendedprice
cases = [
(f.fillna(0),
'isnull(l_extendedprice, CAST(0 AS decimal(12,2)))'),
(f.fillna(0.0), 'isnull(l_extendedprice, 0.0)'),
]
self._check_expr_cases(cases)
class TestBucketHistogram(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_bucket_to_case(self):
buckets = [0, 10, 25, 50]
expr1 = self.table.f.bucket(buckets)
expected1 = """\
CASE
WHEN (f >= 0) AND (f < 10) THEN 0
WHEN (f >= 10) AND (f < 25) THEN 1
WHEN (f >= 25) AND (f <= 50) THEN 2
ELSE NULL
END"""
expr2 = self.table.f.bucket(buckets, close_extreme=False)
expected2 = """\
CASE
WHEN (f >= 0) AND (f < 10) THEN 0
WHEN (f >= 10) AND (f < 25) THEN 1
WHEN (f >= 25) AND (f < 50) THEN 2
ELSE NULL
END"""
expr3 = self.table.f.bucket(buckets, closed='right')
expected3 = """\
CASE
WHEN (f >= 0) AND (f <= 10) THEN 0
WHEN (f > 10) AND (f <= 25) THEN 1
WHEN (f > 25) AND (f <= 50) THEN 2
ELSE NULL
END"""
expr4 = self.table.f.bucket(buckets, closed='right',
close_extreme=False)
expected4 = """\
CASE
WHEN (f > 0) AND (f <= 10) THEN 0
WHEN (f > 10) AND (f <= 25) THEN 1
WHEN (f > 25) AND (f <= 50) THEN 2
ELSE NULL
END"""
expr5 = self.table.f.bucket(buckets, include_under=True)
expected5 = """\
CASE
WHEN f < 0 THEN 0
WHEN (f >= 0) AND (f < 10) THEN 1
WHEN (f >= 10) AND (f < 25) THEN 2
WHEN (f >= 25) AND (f <= 50) THEN 3
ELSE NULL
END"""
expr6 = self.table.f.bucket(buckets,
include_under=True,
include_over=True)
expected6 = """\
CASE
WHEN f < 0 THEN 0
WHEN (f >= 0) AND (f < 10) THEN 1
WHEN (f >= 10) AND (f < 25) THEN 2
WHEN (f >= 25) AND (f <= 50) THEN 3
WHEN f > 50 THEN 4
ELSE NULL
END"""
expr7 = self.table.f.bucket(buckets,
close_extreme=False,
include_under=True,
include_over=True)
expected7 = """\
CASE
WHEN f < 0 THEN 0
WHEN (f >= 0) AND (f < 10) THEN 1
WHEN (f >= 10) AND (f < 25) THEN 2
WHEN (f >= 25) AND (f < 50) THEN 3
WHEN f >= 50 THEN 4
ELSE NULL
END"""
expr8 = self.table.f.bucket(buckets, closed='right',
close_extreme=False,
include_under=True)
expected8 = """\
CASE
WHEN f <= 0 THEN 0
WHEN (f > 0) AND (f <= 10) THEN 1
WHEN (f > 10) AND (f <= 25) THEN 2
WHEN (f > 25) AND (f <= 50) THEN 3
ELSE NULL
END"""
expr9 = self.table.f.bucket([10], closed='right',
include_over=True,
include_under=True)
expected9 = """\
CASE
WHEN f <= 10 THEN 0
WHEN f > 10 THEN 1
ELSE NULL
END"""
expr10 = self.table.f.bucket([10], include_over=True,
include_under=True)
expected10 = """\
CASE
WHEN f < 10 THEN 0
WHEN f >= 10 THEN 1
ELSE NULL
END"""
cases = [
(expr1, expected1),
(expr2, expected2),
(expr3, expected3),
(expr4, expected4),
(expr5, expected5),
(expr6, expected6),
(expr7, expected7),
(expr8, expected8),
(expr9, expected9),
(expr10, expected10),
]
self._check_expr_cases(cases)
def test_cast_category_to_int_noop(self):
# Because the bucket result is an integer, no explicit cast is
# necessary
expr = (self.table.f.bucket([10], include_over=True,
include_under=True)
.cast('int32'))
expected = """\
CASE
WHEN f < 10 THEN 0
WHEN f >= 10 THEN 1
ELSE NULL
END"""
expr2 = (self.table.f.bucket([10], include_over=True,
include_under=True)
.cast('double'))
expected2 = """\
CAST(CASE
WHEN f < 10 THEN 0
WHEN f >= 10 THEN 1
ELSE NULL
END AS double)"""
self._check_expr_cases([(expr, expected),
(expr2, expected2)])
def test_bucket_assign_labels(self):
buckets = [0, 10, 25, 50]
bucket = self.table.f.bucket(buckets, include_under=True)
size = self.table.group_by(bucket.name('tier')).size()
labelled = size.tier.label(['Under 0', '0 to 10',
'10 to 25', '25 to 50'],
nulls='error').name('tier2')
expr = size[labelled, size['count']]
expected = """\
SELECT
CASE tier
WHEN 0 THEN 'Under 0'
WHEN 1 THEN '0 to 10'
WHEN 2 THEN '10 to 25'
WHEN 3 THEN '25 to 50'
ELSE 'error'
END AS `tier2`, count
FROM (
SELECT
CASE
WHEN f < 0 THEN 0
WHEN (f >= 0) AND (f < 10) THEN 1
WHEN (f >= 10) AND (f < 25) THEN 2
WHEN (f >= 25) AND (f <= 50) THEN 3
ELSE NULL
END AS `tier`, count(*) AS `count`
FROM alltypes
GROUP BY 1
) t0"""
result = to_sql(expr)
assert result == expected
self.assertRaises(ValueError, size.tier.label, ['a', 'b', 'c'])
self.assertRaises(ValueError, size.tier.label,
['a', 'b', 'c', 'd', 'e'])
class TestInNotIn(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_field_in_literals(self):
cases = [
(self.table.g.isin(["foo", "bar", "baz"]),
"g IN ('foo', 'bar', 'baz')"),
(self.table.g.notin(["foo", "bar", "baz"]),
"g NOT IN ('foo', 'bar', 'baz')")
]
self._check_expr_cases(cases)
def test_literal_in_list(self):
cases = [
(ibis.literal(2).isin([self.table.a, self.table.b, self.table.c]),
'2 IN (a, b, c)'),
(ibis.literal(2).notin([self.table.a, self.table.b, self.table.c]),
'2 NOT IN (a, b, c)')
]
self._check_expr_cases(cases)
def test_isin_notin_in_select(self):
filtered = self.table[self.table.g.isin(["foo", "bar"])]
result = to_sql(filtered)
expected = """SELECT *
FROM alltypes
WHERE g IN ('foo', 'bar')"""
assert result == expected
filtered = self.table[self.table.g.notin(["foo", "bar"])]
result = to_sql(filtered)
expected = """SELECT *
FROM alltypes
WHERE g NOT IN ('foo', 'bar')"""
assert result == expected
class TestCoalesceGreaterLeast(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_coalesce(self):
t = self.table
cases = [
(ibis.coalesce(t.string_col, 'foo'),
"coalesce(string_col, 'foo')"),
(ibis.coalesce(t.int_col, t.bigint_col),
'coalesce(int_col, bigint_col)'),
]
self._check_expr_cases(cases)
def test_greatest(self):
t = self.table
cases = [
(ibis.greatest(t.string_col, 'foo'),
"greatest(string_col, 'foo')"),
(ibis.greatest(t.int_col, t.bigint_col),
'greatest(int_col, bigint_col)'),
]
self._check_expr_cases(cases)
def test_least(self):
t = self.table
cases = [
(ibis.least(t.string_col, 'foo'),
"least(string_col, 'foo')"),
(ibis.least(t.int_col, t.bigint_col),
'least(int_col, bigint_col)'),
]
self._check_expr_cases(cases)
class TestAnalyticFunctions(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_analytic_exprs(self):
t = self.table
w = ibis.window(order_by=t.float_col)
cases = [
(ibis.row_number().over(w),
'row_number() OVER (ORDER BY float_col) - 1'),
(t.string_col.lag(), 'lag(string_col)'),
(t.string_col.lag(2), 'lag(string_col, 2)'),
(t.string_col.lag(default=0), 'lag(string_col, 1, 0)'),
(t.string_col.lead(), 'lead(string_col)'),
(t.string_col.lead(2), 'lead(string_col, 2)'),
(t.string_col.lead(default=0), 'lead(string_col, 1, 0)'),
(t.double_col.first(), 'first_value(double_col)'),
(t.double_col.last(), 'last_value(double_col)'),
# (t.double_col.nth(4), 'first_value(lag(double_col, 4 - 1))')
]
self._check_expr_cases(cases)
class TestStringBuiltins(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_unary_ops(self):
s = self.table.string_col
cases = [
(s.lower(), 'lower(string_col)'),
(s.upper(), 'upper(string_col)'),
(s.reverse(), 'reverse(string_col)'),
(s.strip(), 'trim(string_col)'),
(s.lstrip(), 'ltrim(string_col)'),
(s.rstrip(), 'rtrim(string_col)'),
(s.capitalize(), 'initcap(string_col)'),
(s.length(), 'length(string_col)'),
(s.ascii_str(), 'ascii(string_col)')
]
self._check_expr_cases(cases)
def test_substr(self):
# Database numbers starting from 1
cases = [
(self.table.string_col.substr(2), 'substr(string_col, 2 + 1)'),
(self.table.string_col.substr(0, 3),
'substr(string_col, 0 + 1, 3)')
]
self._check_expr_cases(cases)
def test_strright(self):
cases = [
(self.table.string_col.right(4), 'strright(string_col, 4)')
]
self._check_expr_cases(cases)
def test_like(self):
cases = [
(self.table.string_col.like('foo%'), "string_col LIKE 'foo%'")
]
self._check_expr_cases(cases)
def test_rlike(self):
ex = "string_col RLIKE '[\d]+'"
cases = [
(self.table.string_col.rlike('[\d]+'), ex),
(self.table.string_col.re_search('[\d]+'), ex),
]
self._check_expr_cases(cases)
def test_re_extract(self):
sql = "regexp_extract(string_col, '[\d]+', 0)"
cases = [
(self.table.string_col.re_extract('[\d]+', 0), sql)
]
self._check_expr_cases(cases)
def test_re_replace(self):
sql = "regexp_replace(string_col, '[\d]+', 'aaa')"
cases = [
(self.table.string_col.re_replace('[\d]+', 'aaa'), sql)
]
self._check_expr_cases(cases)
def test_parse_url(self):
sql = "parse_url(string_col, 'HOST')"
cases = [
(self.table.string_col.parse_url('HOST'), sql)
]
self._check_expr_cases(cases)
def test_repeat(self):
cases = [
(self.table.string_col.repeat(2), 'repeat(string_col, 2)')
]
self._check_expr_cases(cases)
def test_translate(self):
cases = [
(self.table.string_col.translate('a', 'b'),
"translate(string_col, 'a', 'b')")
]
self._check_expr_cases(cases)
def test_find(self):
s = self.table.string_col
i1 = self.table.tinyint_col
cases = [
(s.find('a'), "locate('a', string_col) - 1"),
(s.find('a', 2), "locate('a', string_col, 3) - 1"),
(s.find('a', start=i1),
"locate('a', string_col, tinyint_col + 1) - 1")
]
self._check_expr_cases(cases)
def test_lpad(self):
cases = [
(self.table.string_col.lpad(1, 'a'), "lpad(string_col, 1, 'a')"),
(self.table.string_col.lpad(25), "lpad(string_col, 25, ' ')")
]
self._check_expr_cases(cases)
def test_rpad(self):
cases = [
(self.table.string_col.rpad(1, 'a'), "rpad(string_col, 1, 'a')"),
(self.table.string_col.rpad(25), "rpad(string_col, 25, ' ')")
]
self._check_expr_cases(cases)
def test_find_in_set(self):
cases = [
(self.table.string_col.find_in_set(['a']),
"find_in_set(string_col, 'a') - 1"),
(self.table.string_col.find_in_set(['a', 'b']),
"find_in_set(string_col, 'a,b') - 1")
]
self._check_expr_cases(cases)
def test_string_join(self):
cases = [
(ibis.literal(',').join(['a', 'b']), "concat_ws(',', 'a', 'b')")
]
self._check_expr_cases(cases)
| apache-2.0 |
hainm/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
wavelets/lifelines | lifelines/generate_datasets.py | 3 | 9052 | # -*- coding: utf-8 -*-
# lib to create fake survival datasets
import numpy as np
from numpy import random
import pandas as pd
from scipy import stats as stats
from scipy.optimize import newton
from scipy.integrate import cumtrapz
def exponential_survival_data(n, cr=0.05, scale=1.):
t = stats.expon.rvs(scale=scale, size=n)
if cr == 0.0:
return t, np.ones(n, dtype=bool)
def pF(h):
v = 1.0 * h / scale
return v / (np.exp(v) - 1) - cr
# find the threshold:
h = newton(pF, 1., maxiter=500)
# generate truncated data
R = (1 - np.exp(-h / scale)) * stats.uniform.rvs(size=n)
entrance = -np.log(1 - R) * scale
C = (t + entrance) < h # should occur 1-cr of the time.
T = np.minimum(h - entrance, t)
return T, C
# Models with covariates
class coeff_func(object):
"""This is a decorator class used later to construct nice names"""
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
def __repr__():
s = self.f.__doc__.replace("alpha", "%.2f" % kwargs["alpha"]).replace("beta", "%.2f" % kwargs["beta"])
return s
self.__doc__ = __repr__()
self.__repr__ = __repr__
self.__str__ = __repr__
return self.f(*args, **kwargs)
@coeff_func
def exp_comp_(t, alpha=1, beta=1):
"""beta*(1 - np.exp(-alpha*(t-beta)))"""
return beta * (1 - np.exp(-alpha * np.maximum(0, t - 10 * beta)))
@coeff_func
def log_(t, alpha=1, beta=1):
"""beta*np.log(alpha*(t-beta)+1)"""
return beta * np.log(alpha * np.maximum(0, t - 10 * beta) + 1)
@coeff_func
def inverseSq_(t, alpha=1, beta=1):
"""beta/(t+alpha+1)**(0.5)"""
return beta / (t + alpha + 1) ** (0.5)
@coeff_func
def periodic_(t, alpha=1, beta=1):
"""abs(0.5*beta*sin(0.1*alpha*t + alpha*beta))"""
return 0.5 * beta * np.sin(0.1 * alpha * t)
@coeff_func
def constant_(t, alpha=1, beta=1):
"""beta"""
return beta
FUNCS = [exp_comp_, log_, inverseSq_, constant_, periodic_]
def right_censor_lifetimes(lifetimes, max_, min_=0):
"""
Right censor the deaths, uniformly
lifetimes: (n,) array of positive random variables
max_: the max time a censorship can occur
min_: the min time a censorship can occur
Returns
The actual observations including uniform right censoring, and
D_i (observed death or did not)
I think this is deprecated
"""
n = lifetimes.shape[0]
u = min_ + (max_ - min_) * random.rand(n)
observations = np.minimum(u, lifetimes)
return observations, lifetimes == observations
def generate_covariates(n, d, n_binary=0, p=0.5):
"""
n: the number of instances, integer
d: the dimension of the covarites, integer
binary: a float between 0 and d the represents the binary covariates
p: in binary, the probability of 1
returns (n, d+1)
"""
assert (n_binary >= 0 and n_binary <= d), "binary must be between 0 and d"
covariates = np.zeros((n, d + 1))
covariates[:, :d - n_binary] = random.exponential(1, size=(n, d - n_binary))
covariates[:, d - n_binary:-1] = random.binomial(1, p, size=(n, n_binary))
covariates[:, -1] = np.ones(n)
return covariates
def constant_coefficients(d, timelines, constant=False, independent=0):
"""
Proportional hazards model.
d: the dimension of the dataset
timelines: the observational times
constant: True for constant coefficients
independent: the number of coffients to set to 0 (covariate is ind of survival), or
a list of covariates to make indepent.
returns a matrix (t,d+1) of coefficients
"""
return time_varying_coefficients(d, timelines, constant=True, independent=independent, randgen=random.normal)
def time_varying_coefficients(d, timelines, constant=False, independent=0, randgen=random.exponential):
"""
Time vary coefficients
d: the dimension of the dataset
timelines: the observational times
constant: True for constant coefficients
independent: the number of coffients to set to 0 (covariate is ind of survival), or
a list of covariates to make indepent.
randgen: how scalar coefficients (betas) are sampled.
returns a matrix (t,d+1) of coefficients
"""
t = timelines.shape[0]
try:
a = np.arange(d)
random.shuffle(a)
independent = a[:independent]
except IndexError:
pass
n_funcs = len(FUNCS)
coefficients = np.zeros((t, d))
data_generators = []
for i in range(d):
f = FUNCS[random.randint(0, n_funcs)] if not constant else constant_
if i in independent:
beta = 0
else:
beta = randgen((1 - constant) * 0.5 / d)
coefficients[:, i] = f(timelines, alpha=randgen(2000.0 / t), beta=beta)
data_generators.append(f.__doc__)
df_coefficients = pd.DataFrame(coefficients, columns=data_generators, index=timelines)
return df_coefficients
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
"""
n: the number of instances
d: the number of covariates
lifelines: the observational times
constant: make the coeffients constant (not time dependent)
n_binary: the number of binary covariates
model: from ["aalen", "cox"]
Returns:s
hazard rates: (t,n) dataframe,
coefficients: (t,d+1) dataframe of coefficients,
covarites: (n,d) dataframe
"""
covariates = generate_covariates(n, d, n_binary=n_binary)
if model == "aalen":
coefficients = time_varying_coefficients(d + 1, timelines, independent=independent, constant=constant)
hazard_rates = np.dot(covariates, coefficients.T)
return pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)
elif model == "cox":
covariates = covariates[:, :-1]
coefficients = constant_coefficients(d, timelines, independent)
baseline = time_varying_coefficients(1, timelines)
hazard_rates = np.exp(np.dot(covariates, coefficients.T)) * baseline[baseline.columns[0]].values
coefficients["baseline: " + baseline.columns[0]] = baseline.values
return pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)
else:
raise Exception
def generate_random_lifetimes(hazard_rates, timelines, size=1, censor=None):
"""
Based on the hazard rates, compute random variables from the survival function
hazard_rates: (n,t) array of hazard rates
timelines: (t,) the observation times
size: the number to return, per hardard rate
censor: If True, adds uniform censoring between timelines.max() and 0
If a postive number, censors all events above that value.
If (n,) np.array >=0 , censor elementwise.
Returns:
survival_times: (size,n) array of random variables.
(optional) censorship: if censor is true, returns (size,n) array with bool True
if the death was observed (not right-censored)
"""
n = hazard_rates.shape[1]
survival_times = np.empty((n, size))
cumulative_hazards = cumulative_integral(hazard_rates.values, timelines).T
for i in range(size):
u = random.rand(n, 1)
e = -np.log(u)
v = (e - cumulative_hazards) < 0
cross = v.argmax(1)
survival_times[:, i] = timelines[cross]
survival_times[cross == 0, i] = np.inf
if censor is not None:
if isinstance(censor, bool):
T = timelines.max()
rv = T * random.uniform(size=survival_times.shape)
else:
rv = censor
observed = np.less_equal(survival_times, rv)
survival_times = np.minimum(rv, survival_times)
return survival_times.T, observed.T
else:
return survival_times
def generate_observational_matrix(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
hz, coeff, covariates = generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model=model)
R = generate_random_lifetimes(hz, timelines)
covariates["event_at"] = R.T
return covariates.sort("event_at"), pd.DataFrame(cumulative_integral(coeff.values, timelines), columns=coeff.columns, index=timelines)
def cumulative_integral(fx, x):
"""
Return the cumulative integral of arrays, initial value is 0.
Parameters:
fx: (n,d) numpy array, what you want to integral of
x: (n,) numpy array, location to integrate over.
"""
return cumtrapz(fx.T, x, initial=0).T
def construct_survival_curves(hazard_rates, timelines):
"""
Given hazard rates, reconstruct the survival curves
hazard_rates: (n,t) array
timelines: (t,) the observational times
Returns:t
survial curves, (n,t) array
"""
cumulative_hazards = cumulative_integral(hazard_rates.values, timelines)
return pd.DataFrame(np.exp(-cumulative_hazards), index=timelines)
| mit |
diogo149/treeano | examples/recurrent_convolution/mnist_cnn.py | 3 | 3688 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import itertools
import numpy as np
import sklearn.datasets
import sklearn.cross_validation
import sklearn.metrics
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import recurrent_convolution as rcl
import canopy
fX = theano.config.floatX
# ############################### prepare data ###############################
mnist = sklearn.datasets.fetch_mldata('MNIST original')
# theano has a constant float type that it uses (float32 for GPU)
# also rescaling to [0, 1] instead of [0, 255]
X = mnist['data'].reshape(-1, 1, 28, 28).astype(fX) / 255.0
y = mnist['target'].astype("int32")
X_train, X_valid, y_train, y_valid = sklearn.cross_validation.train_test_split(
X, y, random_state=42)
in_train = {"x": X_train, "y": y_train}
in_valid = {"x": X_valid, "y": y_valid}
# ############################## prepare model ##############################
# - the batch size can be provided as `None` to make the network
# work for multiple different batch sizes
model = tn.HyperparameterNode(
"model",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(None, 1, 28, 28)),
rcl.DefaultRecurrentConv2DNode("conv1"),
tn.ReLUNode("relu1"),
tn.MaxPool2DNode("mp1"),
rcl.DefaultRecurrentConv2DNode("conv2"),
tn.ReLUNode("relu2"),
tn.MaxPool2DNode("mp2"),
tn.DenseNode("fc1"),
tn.ReLUNode("relu3"),
tn.DropoutNode("do1"),
tn.DenseNode("fc2", num_units=10),
tn.SoftmaxNode("pred"),
]),
num_filters=32,
filter_size=(3, 3),
pool_size=(2, 2),
num_units=256,
dropout_probability=0.5,
inits=[treeano.inits.XavierNormalInit()],
)
with_updates = tn.HyperparameterNode(
"with_updates",
tn.AdamNode(
"adam",
{"subtree": model,
"cost": tn.TotalCostNode("cost", {
"pred": tn.ReferenceNode("pred_ref", reference="model"),
"target": tn.InputNode("y", shape=(None,), dtype="int32")},
)}),
cost_function=treeano.utils.categorical_crossentropy_i32,
)
network = with_updates.network()
network.build() # build eagerly to share weights
BATCH_SIZE = 500
valid_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="valid_time"),
canopy.handlers.override_hyperparameters(dropout_probability=0),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"cost": "cost", "pred": "pred"})
def validate(in_dict, results_dict):
valid_out = valid_fn(in_valid)
probabilities = valid_out["pred"]
predicted_classes = np.argmax(probabilities, axis=1)
results_dict["valid_cost"] = valid_out["cost"]
results_dict["valid_time"] = valid_out["valid_time"]
results_dict["valid_accuracy"] = sklearn.metrics.accuracy_score(
y_valid, predicted_classes)
train_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="total_time"),
canopy.handlers.call_after_every(1, validate),
canopy.handlers.time_call(key="train_time"),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"train_cost": "cost"},
include_updates=True)
# ################################# training #################################
print("Starting training...")
canopy.evaluate_until(fn=train_fn,
gen=itertools.repeat(in_train),
max_iters=25)
| apache-2.0 |
rexthompson/axwx | axwx/wu_observation_scraping.py | 1 | 4559 |
"""
Weather Underground PWS Observation Scraping Module
Code to scrape observation data from wunderground's PWS network
"""
import csv
import os
import time
import pandas as pd
import pickle
import requests
def scrape_data_one_day(station_id, year, month, day):
"""
Retrieve PWS data for a single station and a single day
:param station_id: string
PWS station ID
:param year: int
year
:param month: int
month
:param day: int
day
:return: pandas DataFrame with data for requested day
Sample URL:
https://www.wunderground.com/weatherstation/WXDailyHistory.asp?
ID=KWAEDMON15&day=18&month=4&year=2017&graphspan=day&format=1
"""
url = "https://www.wunderground.com/" \
"weatherstation/WXDailyHistory.asp?ID=" \
+ station_id + "&day=" \
+ str(day) + "&month=" \
+ str(month) + "&year=" \
+ str(year) \
+ "&graphspan=day&format=1"
content = requests.get(url).text
content = content.replace("\n", "")
content = content.replace("<br>", "\n")
content = content.replace(",\n", "\n")
data_csv_lines = csv.reader(content.split('\n'), delimiter=',')
data_list = list(data_csv_lines)
data_df = pd.DataFrame.from_records(data_list[1:-1], columns=data_list[0])
return data_df
def scrape_data_multiple_day(station_id, start_date, end_date,
delay=3, combined_df=None):
"""
Retrieve PWS data for a single station over a given date range
:param station_id: string
PWS station ID
:param start_date: int (yyyymmdd)
start date for data retrieval
:param end_date: int (yyyymmdd)
end date for data retrieval
:param delay: int
delay between requests to WU server (seconds)
:param combined_df: pandas.DataFrame
DataFrame to which to append new observations
:return: pandas DataFrame with combined data for period requested
"""
if combined_df is None:
combined_df = pd.DataFrame()
else:
pass
# parse out date components
start_date_str = str(start_date)
start_date_yyyy = int(start_date_str[0:4])
start_date_mm = int(start_date_str[4:6])
start_date_dd = int(start_date_str[6:8])
end_date_str = str(end_date)
end_date_yyyy = int(end_date_str[0:4])
end_date_mm = int(end_date_str[4:6])
end_date_dd = int(end_date_str[6:8])
# create date range
start_date_pd = pd.datetime(start_date_yyyy, start_date_mm, start_date_dd)
end_date_pd = pd.datetime(end_date_yyyy, end_date_mm, end_date_dd)
date_list = pd.date_range(start_date_pd, end_date_pd)
for date in date_list:
temp_yyyy = date.year
temp_mm = date.month
temp_dd = date.day
print('retrieving data for ' + station_id + " on " +
str(temp_yyyy) + "-" + str(temp_mm) + "-" + str(temp_dd))
day_df = scrape_data_one_day(station_id=station_id, year=temp_yyyy,
month=temp_mm, day=temp_dd)
combined_df = combined_df.append(day_df, ignore_index=True)
time.sleep(delay)
return combined_df
# examples to run
# single_day = scrape_data_one_day(station_id="KWAEDMON15",
# year=2016, month=9, day=10)
# multi_day = scrape_data_multi_day("KWAEDMON15", 20170217, 20170219)
def scrape_data_multiple_stations_and_days(station_ids, start_date,
end_date, data_dir, delay=1):
"""
Retrieve PWS data for multiple stations over a given date range
:param station_ids: list
WU PWS station IDs
:param start_date: int (yyyymmdd)
start date for data retrieval
:param end_date: int (yyyymmdd)
end date for data retrieval
:param data_dir: str
data directory to which to save pickle files for each station
:param delay: int
delay between requests to WU server (seconds)
:return: None (files saved to given directory)
"""
orig_dir = os.getcwd()
os.chdir(data_dir)
for station in station_ids:
df = scrape_data_multiple_day(station, start_date, end_date, delay)
filename = station + ".p"
pickle.dump(df, open(filename, "wb"))
os.chdir(orig_dir)
# station_ids = ['KWASEATT134', 'KWASEATT166']
# data_dir = "/Users/Thompson/Desktop/DATA 515/" \
# "Final Project/data/local/wu_station_data"
# scrape_data_multiple_stations_and_days(station_ids, 20160501, 20160503,
# data_dir)
| mit |
SCECcode/BBP | bbp/utils/batch/gmpe_boxplot_gen.py | 1 | 43956 | #!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Python version of Ronnie Kamai Matlab scripts to generate box
plots of the GMPE comparisons. Added pre-computed NGA-West 2
data and pass ranges into this script. No longer uses Matlab and no
longer needs to mine gmpe data files from the realizations.
NGA-West 2 data and ranges calculated for a vs30=500m/s
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import glob
import optparse
import numpy as np
import matplotlib as mpl
if mpl.get_backend() != 'agg':
mpl.use('Agg') # Disables use of Tk/X11
import matplotlib.pyplot as plt
import pylab
# Import plot config file
import plot_config
# --------------------------------------------------------------------------
# Some constants
# --------------------------------------------------------------------------
CODEBASES = ["gp", "ucsb", "sdsu", "exsim", "csm",
"irikura_recipe_m1", "irikura_recipe_m2", "song"]
ALL_PERIODS = [0.010, 0.011, 0.012, 0.013, 0.015, 0.017, 0.020, 0.022, 0.025,
0.029, 0.032, 0.035, 0.040, 0.045, 0.050, 0.055, 0.060, 0.065,
0.075, 0.085, 0.100, 0.110, 0.120, 0.130, 0.150, 0.170, 0.200,
0.220, 0.240, 0.260, 0.280, 0.300, 0.350, 0.400, 0.450, 0.500,
0.550, 0.600, 0.650, 0.750, 0.850, 1.000, 1.100, 1.200, 1.300,
1.500, 1.700, 2.000, 2.200, 2.400, 2.600, 2.800, 3.000, 3.500,
4.000, 4.400, 5.000, 5.500, 6.000, 6.500, 7.500, 8.500, 10.000]
PERIODS = [0.010, 0.020, 0.029, 0.04, 0.05, 0.06, 0.075, 0.1,
0.15, 0.2, 0.26, 0.3, 0.4, 0.5, 0.6, 0.75, 1, 1.5,
2, 3, 4, 5, 6, 7.5, 10]
# NGA-WEST2
NGA2 = {}
NGA2["Tacpt"] = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, 0.25,
0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3]
NGA2["Tave"] = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, 0.25,
0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10]
NGA2["Tnga"] = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, 0.25,
0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10]
NGA2["M55rvLo20"] = [0.056359724, 0.057044943, 0.063052985, 0.078978612,
0.101530703, 0.120119872, 0.13296784, 0.128546962,
0.117750264, 0.10564989, 0.081342995, 0.064121255,
0.036732153, 0.023337408, 0.010869471, 0.006085181,
0.00275199, 0.001625306, 0.001038724, 0.000349007,
0.000157345]
NGA2["M55rvHi20"] = [0.125504337, 0.127306539, 0.137189798, 0.164383634,
0.211322856, 0.250013776, 0.276755137, 0.267553659,
0.245743713, 0.23274815, 0.175957758, 0.136219582,
0.082829275, 0.054039265, 0.0250696, 0.014611802,
0.006641659, 0.003825281, 0.0025881, 0.000950908,
0.000509454]
NGA2["M55rvLo50"] = [0.015693551, 0.015856199, 0.017280531, 0.021608587,
0.02746741, 0.031602528, 0.036296461, 0.037982733,
0.036241044, 0.033239867, 0.026197056, 0.020834545,
0.012570198, 0.008180614, 0.003934713, 0.002213657,
0.001003148, 0.000597907, 0.000348378, 0.000117337,
0.0000529837]
NGA2["M55rvHi50"] = [0.039309261, 0.038862562, 0.040485273, 0.048195188,
0.062499718, 0.076073779, 0.082849381, 0.081310934,
0.078643278, 0.076182335, 0.057678974, 0.045034743,
0.028390863, 0.018236355, 0.008738747, 0.005040957,
0.002288209, 0.001321017, 0.000876422, 0.000354791,
0.000197533]
NGA2["M62ssLo20"] = [0.087999109, 0.089180062, 0.096010328, 0.115150386,
0.146038501, 0.172970911, 0.205608599, 0.211021311,
0.200417144, 0.183593925, 0.151808551, 0.126387094,
0.082732235, 0.059038042, 0.033635823, 0.022089442,
0.011925054, 0.007353834, 0.004339077, 0.001631526,
0.000794116]
NGA2["M62ssHi20"] = [0.183158615, 0.185616615, 0.199832918, 0.239670442,
0.303960005, 0.360016289, 0.427947359, 0.439213212,
0.417142026, 0.3821267, 0.315969609, 0.263058177,
0.172196308, 0.12287995, 0.070008559, 0.04597628,
0.024820438, 0.015699975, 0.010831109, 0.005254633,
0.002884952]
NGA2["M62ssLo50"] = [0.030580163, 0.030848717, 0.032972274, 0.03945978,
0.049770361, 0.059007081, 0.070906448, 0.073393616,
0.070852716, 0.066133238, 0.055633849, 0.046819229,
0.031434513, 0.022366469, 0.013023804, 0.008575046,
0.004623717, 0.002720029, 0.001608385, 0.000605917,
0.000295224]
NGA2["M62ssHi50"] = [0.063648603, 0.064207563, 0.068627468, 0.082130362,
0.103590486, 0.122815508, 0.147582482, 0.156381333,
0.150427109, 0.140306935, 0.116595216, 0.09744809,
0.06557241, 0.046863492, 0.027107343, 0.017847835,
0.009623661, 0.006200904, 0.004292134, 0.002207376,
0.001249165]
NGA2["M66ssLo20"] = [0.107744383, 0.109165193, 0.117586943, 0.140201037,
0.176854839, 0.20820012, 0.247372043, 0.25450452,
0.243911902, 0.225532754, 0.191719076, 0.162971956,
0.110251792, 0.081327671, 0.049388482, 0.034090455,
0.020477546, 0.013130027, 0.0082192, 0.003367838,
0.001731036]
NGA2["M66ssHi20"] = [0.224255816, 0.227213046, 0.244741816, 0.29181009,
0.368100177, 0.433341273, 0.514872497, 0.529717813,
0.507670667, 0.469416878, 0.399038138, 0.339204774,
0.22947466, 0.169272891, 0.102795655, 0.071435586,
0.042750461, 0.028767831, 0.020868348, 0.011856207,
0.006909831]
NGA2["M66ssLo50"] = [0.040622723, 0.040963868, 0.043831131, 0.05231201,
0.065605698, 0.077101819, 0.091977953, 0.095123949,
0.092540383, 0.08714561, 0.075095772, 0.064479136,
0.044790977, 0.03285474, 0.020391872, 0.014108398,
0.008439767, 0.005179811, 0.003248868, 0.00133351,
0.000685985]
NGA2["M66ssHi50"] = [0.084550876, 0.085260925, 0.091228756, 0.108880595,
0.136549666, 0.160477335, 0.191440059, 0.197988036,
0.192610682, 0.181382169, 0.15630201, 0.134204873,
0.093226551, 0.068382836, 0.042443011, 0.029364783,
0.017566268, 0.012096956, 0.00949278, 0.005997868,
0.003578109]
NGA2["M66rvLo20"] = [0.118593137, 0.120081238, 0.128933938, 0.152436184,
0.192932295, 0.228674723, 0.273098834, 0.282332092,
0.272745768, 0.253543599, 0.214588317, 0.181363137,
0.121740662, 0.089205456, 0.053477771, 0.036178557,
0.021099953, 0.013741723, 0.008608295, 0.003434744,
0.001748962]
NGA2["M66rvHi20"] = [0.246836078, 0.249933363, 0.268359099, 0.317275946,
0.401563294, 0.475956476, 0.568419443, 0.587637258,
0.567684581, 0.527717782, 0.446637466, 0.377483608,
0.253387237, 0.185669469, 0.111306973, 0.076091361,
0.045354516, 0.029474332, 0.020705292, 0.011856207,
0.006909831]
NGA2["M66rvLo50"] = [0.044454341, 0.044812694, 0.047826901, 0.056670166,
0.071372107, 0.084454269, 0.101120084, 0.10489642,
0.102773514, 0.097305128, 0.083444492, 0.071212325,
0.049044609, 0.035725784, 0.021893463, 0.014850093,
0.008634888, 0.005565883, 0.003402669, 0.001360002,
0.000693088]
NGA2["M66rvHi50"] = [0.092525887, 0.093271751, 0.099545428, 0.117951525,
0.14855169, 0.175780495, 0.210468207, 0.218328154,
0.213909604, 0.202527875, 0.173678777, 0.148219125,
0.102079928, 0.074358539, 0.045568376, 0.030908523,
0.017972388, 0.011947341, 0.00949278, 0.005997868,
0.003578109]
NGA2["number_of_models"] = 5
NGA2["lines"] = {}
# ASK14
NGA2["lines"][0] = {}
NGA2["lines"][0]['M55rv20'] = [0.078294115, 0.079933732, 0.083837743,
0.098060083, 0.126178875, 0.15302637,
0.18536572, 0.189314042, 0.170859393,
0.142992888, 0.105723299, 0.082809907,
0.04522314, 0.029595103, 0.016841282,
0.010544547, 0.004818515, 0.002752209,
0.00174853, 0.000782269, 0.00040493]
NGA2["lines"][0]['M55rv50'] = [0.023116427, 0.023570773, 0.024758022,
0.028927026, 0.036706894, 0.043886896,
0.052429419, 0.05381973, 0.049571615,
0.042750134, 0.033663115, 0.027251952,
0.015198342, 0.009946158, 0.005711094,
0.003608118, 0.001640174, 0.000927846,
0.000591064, 0.000286921, 0.000159]
NGA2["lines"][0]['M62ss20'] = [0.12254422, 0.124942656, 0.130297048,
0.150847868, 0.192403884, 0.232822794,
0.291700754, 0.311826583, 0.293903871,
0.254214065, 0.197217481, 0.161600864,
0.100232319, 0.070115276, 0.042661843,
0.028446373, 0.014969541, 0.009484945,
0.006573112, 0.003424394, 0.002003189]
NGA2["lines"][0]['M62ss50'] = [0.043199535, 0.044021362, 0.046115234,
0.053618891, 0.067755339, 0.080934191,
0.099637542, 0.106130917, 0.10131549,
0.090302329, 0.074611425, 0.063188104,
0.040023919, 0.027997837, 0.017189375,
0.011565277, 0.006054266, 0.003799318,
0.002640029, 0.001492334, 0.000934576]
NGA2["lines"][0]['M66ss20'] = [0.134751814, 0.137339117, 0.143003339,
0.165108565, 0.210117387, 0.254115854,
0.32259563, 0.351007869, 0.336749551,
0.298874402, 0.241037813, 0.204093093,
0.137460612, 0.100804946, 0.064707146,
0.045155289, 0.025925427, 0.017512459,
0.012800371, 0.007312398, 0.004598762]
NGA2["lines"][0]['M66ss50'] = [0.052450368, 0.053433073, 0.055906419,
0.064860422, 0.081808189, 0.09768103,
0.121872698, 0.132006081, 0.128104399,
0.117158707, 0.100630952, 0.088065626,
0.060572651, 0.044420163, 0.028771281,
0.020259291, 0.011570854, 0.007741131,
0.005673444, 0.003516647, 0.002367664]
NGA2["lines"][0]['M66rv20'] = [0.158173921, 0.161099414, 0.167252722,
0.192124993, 0.243485169, 0.294175301,
0.375030227, 0.411292226, 0.397160104,
0.348808626, 0.276705516, 0.231499156,
0.15222161, 0.109802953, 0.068914848,
0.047304594, 0.026555164, 0.017644296,
0.012723799, 0.007106937, 0.004396405]
NGA2["lines"][0]['M66rv50'] = [0.061740235, 0.062879229, 0.065709854,
0.076067806, 0.09576868, 0.114305309,
0.142885886, 0.155271435, 0.151085446,
0.136732913, 0.115521873, 0.09989127,
0.067077152, 0.048385176, 0.030642187,
0.021223594, 0.011851914, 0.007799408,
0.005639505, 0.003417837, 0.002263481]
# BSSA14
NGA2["lines"][1] = {}
NGA2["lines"][1]['M55rv20'] = [0.103200624, 0.102744349, 0.106384231,
0.127012869, 0.16894211, 0.201087789,
0.195724879, 0.167304181, 0.14318879,
0.124293989, 0.095697641, 0.075777356,
0.043214298, 0.027455774, 0.012787612,
0.007159036, 0.003237635, 0.001912125,
0.001291884, 0.000650001, 0.000376129]
NGA2["lines"][1]['M55rv50'] = [0.034181966, 0.033793532, 0.03461163,
0.040575852, 0.054347581, 0.066151112,
0.066635878, 0.058275829, 0.050645503,
0.04437217, 0.034384001, 0.027206581,
0.015506014, 0.009834329, 0.004629075,
0.002604303, 0.001180174, 0.00070342,
0.000478922, 0.000245534, 0.000137261]
NGA2["lines"][1]['M62ss20'] = [0.13467938, 0.133956003, 0.140430971,
0.168447169, 0.214530628, 0.258475285,
0.325619217, 0.343104753, 0.326252833,
0.302551523, 0.25118075, 0.210675662,
0.142745557, 0.102715435, 0.056535719,
0.035802956, 0.019596719, 0.012789451,
0.00908641, 0.004569246, 0.002508654]
NGA2["lines"][1]['M62ss50'] = [0.052287691, 0.051591468, 0.053477078,
0.062987096, 0.080536509, 0.098547992,
0.126932764, 0.135983768, 0.130806181,
0.12200603, 0.101387145, 0.08470802,
0.057019487, 0.040750863, 0.022564692,
0.01434802, 0.007866971, 0.005189683,
0.003714387, 0.001919458, 0.001034816]
NGA2["lines"][1]['M66ss20'] = [0.158202521, 0.157016139, 0.164250788,
0.195263539, 0.245322121, 0.291565201,
0.361733966, 0.380825687, 0.364318432,
0.340638705, 0.288018612, 0.24543194,
0.169868986, 0.124638055, 0.072676253,
0.048107491, 0.028360548, 0.019663579,
0.014629002, 0.007997478, 0.004637106]
NGA2["lines"][1]['M66ss50'] = [0.065830716, 0.064767755, 0.066963933,
0.078167612, 0.098455968, 0.118412809,
0.149218706, 0.15916656, 0.153698493,
0.144237433, 0.121660064, 0.103063318,
0.070602848, 0.051293771, 0.030015516,
0.019943534, 0.011778954, 0.008265977,
0.006196372, 0.003502493, 0.002011662]
NGA2["lines"][1]['M66rv20'] = [0.159344454, 0.157718647, 0.162355138,
0.189190321, 0.242522634, 0.296904644,
0.370448157, 0.389222271, 0.380316204,
0.36348208, 0.310189694, 0.261080301,
0.176113764, 0.128816124, 0.073937517,
0.047408466, 0.02610192, 0.017157795,
0.012406363, 0.00680793, 0.00386682]
NGA2["lines"][1]['M66rv50'] = [0.064379508, 0.063145127, 0.064226388,
0.073496755, 0.094502471, 0.117099992,
0.148381471, 0.157919907, 0.155700018,
0.149284678, 0.126970796, 0.106171415,
0.070809869, 0.051245156, 0.029515414,
0.019000378, 0.010484792, 0.006981404,
0.005089691, 0.002895511, 0.001628814]
# CB14
NGA2["lines"][2] = {}
NGA2["lines"][2]['M55rv20'] = [0.099467234, 0.101323627, 0.112183161,
0.139516188, 0.179276049, 0.208794488,
0.236334324, 0.227367831, 0.213690186,
0.202389696, 0.153006746, 0.11845181,
0.072025456, 0.046990665, 0.021799652,
0.012543499, 0.005602452, 0.003326331,
0.002250521, 0.000709564, 0.000395457]
NGA2["lines"][2]['M55rv50'] = [0.030038712, 0.030372282, 0.033231082,
0.040504836, 0.051837305, 0.06186335,
0.071990348, 0.07070516, 0.068385459,
0.066245509, 0.050155629, 0.039160646,
0.024687707, 0.0158577, 0.007598911,
0.004268376, 0.001906827, 0.001148711,
0.000762106, 0.000241396, 0.000132217]
NGA2["lines"][2]['M62ss20'] = [0.126712063, 0.128594278, 0.140707067,
0.177343818, 0.223477154, 0.257772028,
0.296672511, 0.295080756, 0.286191632,
0.272175215, 0.236372555, 0.196634904,
0.130312505, 0.094626698, 0.053451624,
0.036335715, 0.019847707, 0.013087038,
0.008906782, 0.003399929, 0.001909228]
NGA2["lines"][2]['M62ss50'] = [0.044746468, 0.045077284, 0.048775413,
0.060180485, 0.07494826, 0.087607432,
0.101702842, 0.102917705, 0.103097554,
0.100565991, 0.086710007, 0.072800271,
0.050125454, 0.035454563, 0.020542778,
0.013617682, 0.007393407, 0.004899792,
0.003267479, 0.001275688, 0.000712612]
NGA2["lines"][2]['M66ss20'] = [0.170529033, 0.173156473, 0.190278723,
0.240216224, 0.297057125, 0.335958226,
0.383154505, 0.383614074, 0.376055962,
0.359316016, 0.325440768, 0.275101871,
0.186669633, 0.141931358, 0.087252227,
0.062117901, 0.037174314, 0.024983154,
0.01722056, 0.007298294, 0.004212382]
NGA2["lines"][2]['M66ss50'] = [0.066252701, 0.066792831, 0.072683506,
0.090029016, 0.109713088, 0.124963706,
0.141969518, 0.143972638, 0.145720381,
0.14273755, 0.127325833, 0.108657518,
0.076694039, 0.05645426, 0.035456949,
0.024600487, 0.014580682, 0.009795704,
0.006613051, 0.002896024, 0.001674337]
NGA2["lines"][2]['M66rv20'] = [0.184351996, 0.18683454, 0.205315799,
0.254529085, 0.315706139, 0.360349661,
0.418206913, 0.424914844, 0.420244062,
0.406891303, 0.362603954, 0.304482679,
0.207383839, 0.157918303, 0.095967737,
0.066166401, 0.03943871, 0.025629854,
0.017367883, 0.007390299, 0.004318174]
NGA2["lines"][2]['M66rv50'] = [0.071860073, 0.072314062, 0.07873705,
0.095922947, 0.117345906, 0.134902018,
0.155821373, 0.1601446, 0.163311017,
0.16191583, 0.141874566, 0.120262113,
0.085204561, 0.062813187, 0.038998697,
0.02620381, 0.015468834, 0.01004927,
0.006669627, 0.002932532, 0.001716387]
# CY14
NGA2["lines"][3] = {}
NGA2["lines"][3]['M55rv20'] = [0.066305558, 0.067111697, 0.074179982,
0.095036673, 0.123416615, 0.142644764,
0.161400945, 0.164342848, 0.15810573,
0.146858551, 0.120613076, 0.097424616,
0.057295277, 0.036281706, 0.017756064,
0.010310636, 0.004418275, 0.002204635,
0.001222029, 0.000410596, 0.000185112]
NGA2["lines"][3]['M55rv50'] = [0.018463001, 0.018654351, 0.020330036,
0.025421867, 0.032314601, 0.037179445,
0.042701719, 0.044685568, 0.043863961,
0.041463067, 0.034992095, 0.028830157,
0.017476736, 0.011301324, 0.005709217,
0.003361906, 0.001469295, 0.000737549,
0.000409857, 0.000138043, 0.0000623338]
NGA2["lines"][3]['M62ss20'] = [0.121311405, 0.122727896, 0.135085752,
0.172257645, 0.221919125, 0.255093084,
0.286281106, 0.290477398, 0.277740593,
0.257882207, 0.217366154, 0.184302226,
0.123950547, 0.087317942, 0.050129077,
0.032714089, 0.015885821, 0.00865157,
0.005104797, 0.001919442, 0.000934254]
NGA2["lines"][3]['M62ss50'] = [0.040277237, 0.040661168, 0.044288779,
0.055645416, 0.070569967, 0.080455826,
0.089885224, 0.092037335, 0.088737939,
0.083155651, 0.071053618, 0.060923361,
0.041780535, 0.029927567, 0.017734921,
0.011771782, 0.005844054, 0.003200034,
0.001892218, 0.000712843, 0.000347322]
NGA2["lines"][3]['M66ss20'] = [0.139835103, 0.141296652, 0.154613881,
0.195910158, 0.25217984, 0.290792777,
0.328359244, 0.335055139, 0.322646636,
0.30178809, 0.25773635, 0.221182661,
0.153114203, 0.110698168, 0.066727504,
0.04580305, 0.025733845, 0.015447091,
0.009669647, 0.003962163 ,0.002036513]
NGA2["lines"][3]['M66ss50'] = [0.05117123, 0.051603313, 0.055986047,
0.070134114, 0.089039842, 0.101850955,
0.114029755, 0.116641223, 0.112678949,
0.105927347, 0.091002104, 0.078529457,
0.055097009, 0.040415029, 0.025138813,
0.017568777, 0.010100783, 0.006093895,
0.003822198, 0.001568836, 0.000807041]
NGA2["lines"][3]['M66rv20'] = [0.165222287, 0.167018995, 0.182744763,
0.231018774, 0.29636094, 0.341259538,
0.385374866, 0.393711481, 0.380085741,
0.356317691, 0.305152999, 0.262273674,
0.181926993, 0.131646982, 0.079392413,
0.054463201, 0.028971512, 0.016598209,
0.010127406, 0.004040875, 0.002057602]
NGA2["lines"][3]['M66rv50'] = [0.060589968, 0.061116781, 0.06630675,
0.082964517, 0.105147907, 0.120204501,
0.134588955, 0.137751276, 0.133316774,
0.125538394, 0.108056134, 0.093331271,
0.065550206, 0.048103527, 0.029926933,
0.020902627, 0.011373672, 0.006548098,
0.00400314, 0.001600002, 0.000815398]
# I14
NGA2["lines"][4] = {}
NGA2["lines"][4]['M55rv20'] = [0.087697354, 0.090097438, 0.09887849,
0.110084449, 0.134575561, 0.160928311,
0.180334348, 0.178941422, 0.163544512,
0.147204058, 0.114344701, 0.088073397,
0.052658956, 0.035432214, 0.017176806,
0.01008293, 0.004941357, 0.002730032,
0.001818643, 0.000743167, 0.000404006]
NGA2["lines"][4]['M55rv50'] = [0.024284319, 0.024948927, 0.027380493,
0.031602213, 0.038624648, 0.045673795,
0.05337671, 0.052649613, 0.048957336,
0.044944079, 0.03577697, 0.027840141,
0.017806012, 0.012071148, 0.006100439,
0.003627924, 0.001733859, 0.001022914,
0.00068694, 0.000308514, 0.000171768]
NGA2["lines"][4]['M62ss20'] = [0.129532319, 0.133077334, 0.146047282,
0.161737955, 0.201114312, 0.243558186,
0.28287894, 0.281707508, 0.261615167,
0.237527215, 0.192930277, 0.158476514,
0.099545998, 0.071094104, 0.039852915,
0.02604251, 0.015721297, 0.010398963,
0.007866661, 0.003986387, 0.002253587]
NGA2["lines"][4]['M62ss50'] = [0.040078316, 0.041175172, 0.045188175,
0.052210254, 0.065207191, 0.078100674,
0.093323535, 0.092353303, 0.087137148,
0.08102047, 0.067551194, 0.05610959,
0.037802688, 0.027209137, 0.015915124,
0.010553121, 0.00619437, 0.004401866,
0.003361288, 0.001876756, 0.00108623]
NGA2["lines"][4]['M66ss20'] = [0.173892963, 0.178652031, 0.196063768,
0.214838218, 0.271061525, 0.329414349,
0.388568754, 0.385359276, 0.359681877,
0.326257713, 0.270727258, 0.229784598,
0.148185133, 0.108582609, 0.064899461,
0.044726919, 0.030520137, 0.022095423,
0.018004602, 0.010309745, 0.006008548]
NGA2["lines"][4]['M66ss50'] = [0.057325989, 0.058894876, 0.064634872,
0.074160222, 0.094227991, 0.113263563,
0.13639001, 0.134387743, 0.127335535,
0.118561659, 0.101082536, 0.086802729,
0.060132058, 0.044413628, 0.027713707,
0.019398494, 0.012848775, 0.010028148,
0.008254591, 0.005215538, 0.003111399]
NGA2["lines"][4]['M66rv20'] = [0.188375998, 0.193531435, 0.212393344,
0.232731463, 0.293637445, 0.356850304,
0.420931506, 0.41745472, 0.389638726,
0.353430762, 0.293275338, 0.248922683,
0.160527038, 0.115296983, 0.067548058,
0.045630463, 0.031136684, 0.022095423,
0.018004602, 0.010309745, 0.006008548]
NGA2["lines"][4]['M66rv50'] = [0.062100503, 0.063800057, 0.070018121,
0.08033681, 0.102075964, 0.122696953,
0.147749534, 0.145580504, 0.137940939,
0.128436312, 0.109501404, 0.094032274,
0.065140281, 0.047160013, 0.028844724,
0.01979037, 0.013108337, 0.010028148,
0.008254591, 0.005215538, 0.003111399]
# Averages
NGA2["lines"]["avg"] = {}
NGA2["lines"]["avg"]['M55rv20'] = [0.086992977, 0.088242169, 0.095092722,
0.113942052, 0.146477842, 0.173296344,
0.191832043, 0.185454065, 0.169877722,
0.152747836, 0.117877093, 0.092507417,
0.054083425, 0.035151092, 0.017272283,
0.01012813, 0.004603647, 0.002585067,
0.001666321, 0.000659119, 0.000353127]
NGA2["lines"]["avg"]['M55rv50'] = [0.026016885, 0.026267973, 0.028062253,
0.033406359, 0.042766206, 0.05095092,
0.057426815, 0.05602718, 0.052284775,
0.047954992, 0.037794362, 0.030057895,
0.018134962, 0.011802132, 0.005949747,
0.003494125, 0.001586066, 0.000908088,
0.000585778, 0.000244082, 0.000132516]
NGA2["lines"]["avg"]['M62ss20'] = [0.126955878, 0.128659633, 0.138513624,
0.166126891, 0.210689021, 0.249544276,
0.296630506, 0.3044394, 0.289140819,
0.264870045, 0.219013444, 0.182338034,
0.119357385, 0.085173891, 0.048526236,
0.031868329, 0.017204217, 0.010882393,
0.007507552, 0.00345988, 0.001921782]
NGA2["lines"]["avg"]['M62ss50'] = [0.044117849, 0.044505291, 0.047568936,
0.056928429, 0.071803453, 0.085129223,
0.102296381, 0.105884605, 0.102218862,
0.095410094, 0.080262678, 0.067545869,
0.045350417, 0.032267993, 0.018789378,
0.012371176, 0.006670614, 0.004298139,
0.00297508, 0.001455416, 0.000823111]
NGA2["lines"]["avg"]['M66ss20'] = [0.155442287, 0.157492082, 0.1696421,
0.202267341, 0.2551476, 0.300369281,
0.35688242, 0.367172409, 0.351890492,
0.325374985, 0.27659216, 0.235118832,
0.159059713, 0.117331027, 0.071252518,
0.04918213, 0.029542854, 0.019940341,
0.014464836, 0.007376016, 0.004298662]
NGA2["lines"]["avg"]['M66ss50'] = [0.058606201, 0.05909837, 0.063234955,
0.075470277, 0.094649016, 0.111234412,
0.132696137, 0.137234849, 0.133507551,
0.125724539, 0.108340298, 0.09302373,
0.064619721, 0.04739937, 0.029419253,
0.020354116, 0.012176009, 0.008384971,
0.006111931, 0.003339907, 0.00199442]
NGA2["lines"]["avg"]['M66rv20'] = [0.171093731, 0.173240606, 0.186012353,
0.219918927, 0.278342465, 0.329907889,
0.393998334, 0.407319109, 0.393488967,
0.365786092, 0.3095855, 0.261651699,
0.175634649, 0.128696269, 0.077152115,
0.052194625, 0.030440798, 0.019825115,
0.014126011, 0.007131157, 0.00412951]
NGA2["lines"]["avg"]['M66rv50'] = [0.064134057, 0.064651051, 0.068999633,
0.081757767, 0.102968185, 0.121841755,
0.145885444, 0.151333544, 0.148270839,
0.140381625, 0.120384955, 0.102737668,
0.070756414, 0.051541412, 0.031585591,
0.021424156, 0.01245751, 0.008281265,
0.005931311, 0.003212284, 0.001907096]
NGA2["labels"] = ['ASK14', 'BSSA14', 'CB14', 'CY14', 'I14']
NGA2["acptThin"] = [3, 4, 5, 7.5, 10]
# 9 line colors in 'excel' colormap
RGB_XLS = [[0.96863, 0.58824, 0.27451],
[0.50196, 0.39216, 0.77647],
[0.60784, 0.73333, 0.34902],
[0.75294, 0.31373, 0.30196],
[0.58039, 0.54118, 0.32941],
[0.29412, 0.67451, 0.77647],
[0.30980, 0.50588, 0.74118],
[0.12157, 0.28627, 0.49020],
[0.45, 0.45, 0.45]]
COLOR_NGA = [0, 0, 0]
# --------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------
def create_plot(codebase, mag, vel, dist, mech, sim_data, output_file):
"""
This function creates a GMPE comparison plot
Code was migrated from the Matlab script written by Ronnie.
"""
# Set method name
if codebase == 'gp':
method_name = "Graves & Pitarka"
elif codebase == 'sdsu':
method_name = "SDSU"
elif codebase == 'ucsb':
method_name = "UCSB"
elif codebase == 'exsim':
method_name = "ExSIM"
elif codebase == 'csm':
method_name = "CSM"
elif codebase == 'irikura_recipe_m1':
method_name = "Irikura Recipe M1"
elif codebase == 'irikura_recipe_m2':
method_name = "Irikura Recipe M2"
elif codebase == 'song':
method_name = "Song"
else:
method_name = ""
plot_title = ("%s, Scenario: M%.1f, %s, R=%d km, %s" %
(method_name, mag, mech, dist, vel))
# Select model and lines, use NGA-W2
nga = NGA2
nga_lines = []
nga_labels = nga["labels"]
nga_periods = nga["Tnga"]
nga_ave_periods = nga["Tave"]
nga_acpt_thin = nga["acptThin"]
if mech == "REV" and mag == 5.5 and dist == 20:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M55rvLo20"]
acpt_hi = nga["M55rvHi20"]
nga_ave = nga["lines"]["avg"]["M55rv20"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M55rv20"])
elif mech == "REV" and mag == 5.5 and dist == 50:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M55rvLo50"]
acpt_hi = nga["M55rvHi50"]
nga_ave = nga["lines"]["avg"]["M55rv50"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M55rv50"])
elif mech == "SS" and mag == 6.2 and dist == 20:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M62ssLo20"]
acpt_hi = nga["M62ssHi20"]
nga_ave = nga["lines"]["avg"]["M62ss20"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M62ss20"])
elif mech == "SS" and mag == 6.2 and dist == 50:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M62ssLo50"]
acpt_hi = nga["M62ssHi50"]
nga_ave = nga["lines"]["avg"]["M62ss50"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M62ss50"])
elif mech == "SS" and mag == 6.6 and dist == 20:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M66ssLo20"]
acpt_hi = nga["M66ssHi20"]
nga_ave = nga["lines"]["avg"]["M66ss20"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M66ss20"])
elif mech == "SS" and mag == 6.6 and dist == 50:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M66ssLo50"]
acpt_hi = nga["M66ssHi50"]
nga_ave = nga["lines"]["avg"]["M66ss50"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M66ss50"])
elif mech == "REV" and mag == 6.6 and dist == 20:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M66rvLo20"]
acpt_hi = nga["M66rvHi20"]
nga_ave = nga["lines"]["avg"]["M66rv20"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M66rv20"])
elif mech == "REV" and mag == 6.6 and dist == 50:
acpt_x = nga["Tacpt"]
acpt_lo = nga["M66rvLo50"]
acpt_hi = nga["M66rvHi50"]
nga_ave = nga["lines"]["avg"]["M66rv50"]
for line in range(0, nga["number_of_models"]):
nga_lines.append(nga["lines"][line]["M66rv50"])
# Create arrays for sim_data mean and stddev
sim_mean = []
for data in sim_data:
sim_mean.append(np.exp(np.mean(np.log(data))))
box_positions = PERIODS
box_width = 1.5*10.**(np.log10(PERIODS)-1)
# y axis
ymin = 0.00006
ymax = 4.0
# Initialize plot
fig, _ = plt.subplots()
fig.clf()
# Array for labels
l_handlers = []
labels = nga_labels
labels.append("Mean NGA Models")
labels.append("Acceptance Criteria")
# Plot NGA lines and average
for line in range(0, len(nga_lines)):
l_handlers.append(plt.plot(nga_periods, nga_lines[line],
color=RGB_XLS[line], lw=1.8)[0])
l_handlers.append(plt.plot(nga_ave_periods, nga_ave,
color=COLOR_NGA, lw=1.8)[0])
# Plot acceptance criteria
l_handlers.append(plt.plot(acpt_x, acpt_hi[0:len(acpt_x)],
color=COLOR_NGA, lw=2, ls='--')[0])
plt.plot(acpt_x, acpt_lo[0:len(acpt_x)], color=COLOR_NGA, lw=2, ls='--')
if len(nga_acpt_thin):
plt.plot(nga_acpt_thin,
acpt_hi[len(acpt_hi)-len(nga_acpt_thin):len(acpt_hi)],
color=COLOR_NGA, lw=0.8, ls='--')
plt.plot(nga_acpt_thin,
acpt_lo[len(acpt_lo)-len(nga_acpt_thin):len(acpt_lo)],
color=COLOR_NGA, lw=0.8, ls='--')
# Now plot the data
plt.boxplot(sim_data, positions=box_positions,
widths=box_width, whis='range',
boxprops=dict(linewidth=1.5),
whiskerprops=dict(linewidth=1.1,
linestyle='-',
color='k'))
plt.scatter(box_positions, sim_mean, s=50, color='red', marker='o')
plt.legend(l_handlers, labels, loc='lower left')
plt.xscale('log')
plt.yscale('log')
plt.ylabel('PSA (g)')
plt.xlabel('Period (sec)')
plt.title(plot_title)
plt.axis([0.01, 20, ymin, ymax])
plt.xticks([0.01, 0.1, 1, 10], ["0.01", "0.1", "1", "10"])
plt.grid(True)
plt.grid(b=True, which='major', linestyle='-', color='lightgray')
plt.grid(b=True, which='minor', linewidth=0.5, color='gray')
plt.minorticks_on()
# All done! Save plot!
pylab.gcf().set_size_inches(10, 7.5)
pylab.savefig(output_file, format="png", dpi=plot_config.dpi)
def load_sim_data(input_dir):
"""
This function reads all simulation data from the rd50 files
"""
# Create empty arrays for simulation data
sim_data = []
for _ in PERIODS:
sim_data.append([])
realizations = sorted(os.listdir(input_dir))
for realization in realizations:
basedir = os.path.join(input_dir, realization)
stations = sorted(glob.glob("%s%s*.rd50" % (basedir, os.sep)))
for station in stations:
input_file = open(station, 'r')
# Start with the first period
cur_period_idx = 0
for line in input_file:
cur_period = PERIODS[cur_period_idx]
line = line.strip()
# Skip comments
if line.startswith("#"):
continue
pieces = [float(item) for item in line.split()]
# Check if we want this period
if pieces[0] == cur_period:
sim_data[cur_period_idx].append(pieces[3])
cur_period_idx = cur_period_idx + 1
# Done with this file
input_file.close()
# Return simulation data
return sim_data
# --------------------------------------------------------------------------
# Main
# --------------------------------------------------------------------------
def run():
"""
Main part of the code
"""
parser = optparse.OptionParser()
parser.add_option("-d", "--dir", dest="input_dir",
help="Input directory containing simulation results")
parser.add_option("-o", "--output", dest="output_dir",
help="Output directory for the plots")
parser.add_option("-c", "--codebase", dest="codebase",
help="Codebase used in the simulation")
parser.add_option("-m", "--mag", type="float", dest="mag",
help="Event Magnitude")
parser.add_option("--dist", type="int", dest="dist",
help="Distance of the stations in km")
parser.add_option("--mech", dest="mech",
help="Fault mechanism")
parser.add_option("--vel", dest="vel",
help="Velocity model")
(options, _) = parser.parse_args()
if options.input_dir is None:
parser.error("Please specify the input directory!")
top_input_dir = options.input_dir
if not os.path.isdir(top_input_dir):
parser.error("Invalid input directory!")
dirs = os.listdir(top_input_dir)
if not "Sims" in dirs:
parser.error("Please provide the top-level simulation directory!\n"
"This is the directory given to the cluster script")
input_outdir = os.path.join(top_input_dir, "Sims", "outdata")
# Validate codebase to use
codebase = options.codebase
if codebase is None:
print("Please specify a codebase!")
sys.exit(1)
codebase = codebase.lower()
if codebase not in CODEBASES:
print("Codebase needs to be one of: %s" % (CODEBASES))
sys.exit(1)
if options.mag is None:
parser.error("Please specify magnitude!")
mag = float(options.mag)
if options.mech is None:
parser.error("Please specify fault mechanism!")
mech = options.mech.upper()
if options.dist is None:
parser.error("Please specify station distance!")
dist = int(options.dist)
if options.vel is None:
parser.error("Please specify velocity model!")
vel = options.vel.upper()
# Default is to use NGA-WEST2
model_short = "w2"
# Output filename
r_output_file = ("boxplot_%s-gmpe%d%s%d-%s-%s.png" % (codebase,
int(mag * 10),
mech,
dist,
vel,
model_short))
if options.output_dir is None:
output_file = r_output_file
else:
output_file = os.path.join(options.output_dir, r_output_file)
# Load simulation data
sim_data = load_sim_data(input_outdir)
# Create plot
create_plot(codebase, mag, vel, dist, mech, sim_data, output_file)
print("All Done!")
### Main
run()
| apache-2.0 |
kbg/drms | examples/plot_aia_ligthcurve.py | 1 | 2154 | from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import example_helpers
import drms
# Series name, timespan and wavelength
series = 'aia.lev1_euv_12s'
series_lev1 = 'aia.lev1'
wavelen = 335
#tsel = '2015-01-01T00:00:01Z/1h'
#tsel = '2015-01-01T00:00:01Z/1d'
#tsel = '2015-01-01T00:00:01Z/1d@60s'
#tsel = '2015-01-01T00:00:01Z/7d@1h'
#tsel = '2015-01-01T00:00:01Z/30d@6h'
#tsel = '2015-01-01T00:00:01Z/100d@1d'
tsel = '2014-01-01T00:00:01Z/365d@1d'
# DRMS query string
qstr = '%s[%s][%d]' % (series, tsel, wavelen)
# Some keywords we are interested in; you can use c.keys(series) to get a
# list of all available keywords of a series.
keys = ['T_REC', 'T_OBS', 'DATAMIN', 'DATAMAX', 'DATAMEAN', 'DATARMS',
'DATASKEW', 'DATAKURT', 'QUALITY']
# Create DRMS client, uses JSOC baseurl by default, set debug=True to see the
# DRMS query URLs.
c = drms.Client(debug=False)
# Get detailed information about the series. Some keywords from
# aia.lev1_euv_12s are links to keywords in aia.lev1 and unfortunally some
# entries (like note) are missing for linked keywords, so we are using the
# entries from aia.lev1 in this case.
print('Querying series info...')
si = c.info(series)
si_lev1 = c.info(series_lev1)
for k in keys:
linkinfo = si.keywords.loc[k].linkinfo
if linkinfo is not None and linkinfo.startswith('lev1->'):
note_str = si_lev1.keywords.loc[k].note
else:
note_str = si.keywords.loc[k].note
print('%10s : %s' % (k, note_str))
# Get keyword values for the selected timespan and wavelength
print('Querying keyword data...\n -> %s' % qstr)
res = c.query(qstr, key=keys)
print(' -> %d lines retrieved.' % len(res))
# Only use entries with QUALITY==0
res = res[res.QUALITY == 0]
print(' -> %d lines after QUALITY selection.' % len(res))
# Convert T_REC strings to datetime and use it as index for the series
res.index = drms.to_datetime(res.T_REC)
# Create some simple plots
ax = res[['DATAMIN', 'DATAMAX', 'DATAMEAN', 'DATARMS', 'DATASKEW']].plot(
figsize=(8, 10), subplots=True)
ax[0].set_title(qstr, fontsize='medium')
plt.tight_layout()
plt.show()
| mit |
NicWayand/xray | xarray/core/coordinates.py | 1 | 8137 | from collections import Mapping
from contextlib import contextmanager
import pandas as pd
from . import formatting
from .utils import Frozen
from .merge import merge_coords, merge_coords_without_align
from .pycompat import iteritems, basestring, OrderedDict
from .variable import default_index_coordinate
class AbstractCoordinates(Mapping, formatting.ReprMixin):
def __getitem__(self, key):
if (key in self._names or
(isinstance(key, basestring) and
key.split('.')[0] in self._names)):
# allow indexing current coordinates or components
return self._data[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.update({key: value})
@property
def indexes(self):
return self._data.indexes
@property
def variables(self):
raise NotImplementedError
def _update_coords(self, coords):
raise NotImplementedError
def __iter__(self):
# needs to be in the same order as the dataset variables
for k in self.variables:
if k in self._names:
yield k
def __len__(self):
return len(self._names)
def __contains__(self, key):
return key in self._names
def __unicode__(self):
return formatting.coords_repr(self)
@property
def dims(self):
return self._data.dims
def to_index(self, ordered_dims=None):
"""Convert all index coordinates into a :py:class:`pandas.MultiIndex`
"""
if ordered_dims is None:
ordered_dims = self.dims
indexes = [self.variables[k].to_index() for k in ordered_dims]
return pd.MultiIndex.from_product(indexes, names=list(ordered_dims))
def update(self, other):
other_vars = getattr(other, 'variables', other)
coords = merge_coords([self.variables, other_vars],
priority_arg=1, indexes=self.indexes,
indexes_from_arg=0)
self._update_coords(coords)
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = OrderedDict(self.variables)
else:
# don't align because we already called xarray.align
variables = merge_coords_without_align(
[self.variables, other.variables])
return variables
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in priority_vars, because we didn't align
# first
priority_vars = OrderedDict(
(k, v) for k, v in self.variables.items() if k not in self.dims)
variables = merge_coords_without_align(
[self.variables, other.variables], priority_vars=priority_vars)
yield
self._update_coords(variables)
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = merge_coords_without_align([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords))
class DatasetCoordinates(AbstractCoordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataset):
self._data = dataset
@property
def _names(self):
return self._data._coord_names
@property
def variables(self):
return Frozen(OrderedDict((k, v)
for k, v in self._data.variables.items()
if k in self._names))
def to_dataset(self):
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
for dim, size in dims.items():
if dim not in variables:
variables[dim] = default_index_coordinate(dim, size)
updated_coord_names = set(coords) | set(dims)
self._data._variables = variables
self._data._coord_names.update(updated_coord_names)
self._data._dims = dict(dims)
def __delitem__(self, key):
if key in self:
del self._data[key]
else:
raise KeyError(key)
class DataArrayCoordinates(AbstractCoordinates):
"""Dictionary like container for DataArray coordinates.
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataarray):
self._data = dataarray
@property
def _names(self):
return set(self._data._coords)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
dims = calculate_dimensions(coords)
if set(dims) != set(self.dims):
raise ValueError('cannot add coordinates with new dimensions to '
'a DataArray')
self._data._coords = coords
@property
def variables(self):
return Frozen(self._data._coords)
def _to_dataset(self, shallow_copy=True):
from .dataset import Dataset
coords = OrderedDict((k, v.copy(deep=False) if shallow_copy else v)
for k, v in self._data._coords.items())
return Dataset._from_vars_and_coord_names(coords, set(coords))
def to_dataset(self):
return self._to_dataset()
def __delitem__(self, key):
if key in self.dims:
raise ValueError('cannot delete a coordinate corresponding to a '
'DataArray dimension')
del self._data._coords[key]
class Indexes(Mapping, formatting.ReprMixin):
"""Ordered Mapping[str, pandas.Index] for xarray objects.
"""
def __init__(self, variables, dims):
"""Not for public consumption.
Arguments
---------
variables : OrderedDict
Reference to OrderedDict holding variable objects. Should be the
same dictionary used by the source object.
dims : sequence or mapping
Should be the same dimensions used by the source object.
"""
self._variables = variables
self._dims = dims
def __iter__(self):
return iter(self._dims)
def __len__(self):
return len(self._dims)
def __contains__(self, key):
return key in self._dims
def __getitem__(self, key):
if key in self:
return self._variables[key].to_index()
else:
raise KeyError(key)
def __unicode__(self):
return formatting.indexes_repr(self)
| apache-2.0 |
benslice/ggplot | ggplot/themes/theme_xkcd.py | 12 | 1530 | from copy import copy, deepcopy
from .theme import theme
import matplotlib.pyplot as plt
import matplotlib as mpl
class theme_xkcd(theme):
"""
xkcd theme
The theme internaly uses the settings from pyplot.xkcd().
@todo: testme.
"""
def __init__(self, scale=1, length=100, randomness=2):
super(theme_xkcd, self).__init__(complete=True)
with plt.xkcd(scale=scale, length=length, randomness=randomness):
_xkcd = mpl.rcParams.copy()
# no need to a get a deprecate warning for nothing...
for key in mpl._deprecated_map:
if key in _xkcd:
del _xkcd[key]
if 'tk.pythoninspect' in _xkcd:
del _xkcd['tk.pythoninspect']
self._rcParams.update(_xkcd)
def __deepcopy__(self, memo):
class _empty(object):
pass
result = _empty()
result.__class__ = self.__class__
result.__dict__["_rcParams"] = {}
for k, v in self._rcParams.items():
try:
result.__dict__["_rcParams"][k] = deepcopy(v, memo)
except NotImplementedError:
# deepcopy raises an error for objects that are drived from or
# composed of matplotlib.transform.TransformNode.
# Not desirable, but probably requires upstream fix.
# In particular, XKCD uses matplotlib.patheffects.withStrok
# -gdowding
result.__dict__["_rcParams"][k] = copy(v)
return result
| bsd-2-clause |
mjirik/lisa | lisa/organ_segmentation.py | 1 | 91578 | # /usr/bin/env python
# -*- coding: utf-8 -*-
"""
LISA - organ segmentation tool.
Liver Surgery Analyser
python organ_segmentation.py
python organ_segmentation.py -mroi -vs 0.6
Author: Miroslav Jirik
Email: miroslav.jirik@gmail.com
"""
# from loguru import logger
# # logger = logging.getLogger()
# from loguru import logger.handlers
from loguru import logger
import sys
import os
import os.path as op
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(path_to_script, "../../imcut/"))
# from collections import namedtuple
# from scipy.io import loadmat, savemat
import scipy
import scipy.ndimage
import numpy as np
import scipy.sparse
import datetime
import argparse
import copy
import json
from . import json_decoder as jd
from . import exceptionProcessing
# from . import config_default
# tady uz je logger
# import dcmreaddata as dcmreader
# from imcut import pycut
# try:
# import imcut # noqa
# from imcut import pycut
# except:
# path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src"))
# logger.warning("Deprecated of pyseg_base as submodule")
# import traceback
# traceback.print_exc()
# import pycut
# from seg2fem import gen_mesh_from_voxels, gen_mesh_from_voxels_mc
# from viewer import QVTKViewer
from io3d import datareader
from io3d import datawriter
from io3d import misc
import io3d.cachefile as cachef
import io3d.misc
from . import data_plus
from . import support_structure_segmentation as sss
from . import config_default
from . import organ_seeds
from . import lisa_data
from . import data_manipulation
from . import qmisc
from . import config
from . import volumetry_evaluation
from . import segmentation_general
# import imtools.image_manipulation
import imma.image_manipulation as ima
import imma.labeled
import imma.segmentation_labels as imsl
from . import virtual_resection
# import audiosupport
# import skimage
# import skimage.transform
scaling_modes = {
'original': (None, None, None),
'double': (None, 'x2', 'x2'),
'3mm': (None, '3', '3')
}
# Defaultparameters for segmentation
# version comparison
# from pkg_resources import parse_version
import sklearn
# if parse_version(sklearn.__version__) > parse_version('0.10'):
# # new versions
# else:
# cvtype_name = 'cvtype'
cvtype_name = 'covariance_type'
default_segmodelparams = {
'type': 'gmmsame',
'params': {cvtype_name: 'full', 'n_components': 3}
}
config_version = [1, 0, 0]
def import_gui():
# from lisaWindow import OrganSegmentationWindow
# from PyQt4.QtGui import QApplication, QMainWindow, QWidget,\
# QGridLayout, QLabel, QPushButton, QFrame, \
# QFont, QPixmap
# from PyQt4.Qt import QString
pass
def printTotals(transferred, toBeTransferred):
print("Transferred: {0}\tOut of: {1}".format(transferred, toBeTransferred))
class OrganSegmentation():
"""
Main object of Lisa user interface.
"""
def set_params(self, *args, **kwargs):
"""
Function set parameters in same way as constructor does
:param args:
:param kwargs:
:return:
"""
self.__init__(*args, **kwargs)
def __init__(
self,
datapath=None,
working_voxelsize_mm=3,
viewermax=None,
viewermin=None,
series_number=None,
autocrop=True,
autocrop_margin_mm=[10, 10, 10],
manualroi=False,
texture_analysis=None,
segmentation_smoothing=False,
smoothing_mm=4,
volume_blowup=1.00,
data3d=None,
metadata=None,
seeds=None,
edit_data=False,
segparams={},
segmodelparams=default_segmodelparams,
roi=None,
output_label=1,
slab={},
output_datapath=None,
input_datapath_start='',
experiment_caption='',
lisa_operator_identifier='',
volume_unit='ml',
save_filetype='pklz',
debug_mode=False,
seg_postproc_pars={},
cache_filename='cache.yml',
seg_preproc_pars={},
after_load_processing={},
segmentation_alternative_params=None,
sftp_username='lisa_default',
sftp_password='',
input_annotation_file=None,
output_annotation_file=None,
# run=False,
run_organ_segmentation=False,
run_vessel_segmentation=False,
run_vessel_segmentation_params={},
run_list = None,
get_series_number_callback=None
# iparams=None,
):
""" Segmentation of objects from CT data.
:param datapath: path to directory with dicom files
:param manualroi: manual set of ROI before data processing, there is a
problem with correct coordinates
:param data3d, metadata: it can be used for data loading not from
directory. If both are setted, datapath is ignored
:param output_label: label for output segmented volume
:param slab: aditional label system for description segmented data
{'none':0, 'liver':1, 'lesions':6}
:param roi: region of interest.
[[startx, stopx], [sty, spy], [stz, spz]]
:param seeds: ndimage array with size same as data3d
:param experiment_caption = this caption is used for naming of outputs
:param lisa_operator_identifier: used for logging
:param input_datapath_start: Path where user directory selection dialog
starts.
:param volume_blowup: Blow up volume is computed in smoothing so it is
working only if smoothing is turned on.
:param seg_postproc_pars: Can be used for setting postprocessing
parameters. For example
:param segmentation_alternative_params: dict of alternative params f,e.
{'vs5: {'voxelsize_mm':[5,5,5]}, 'vs3: {'voxelsize_mm':[3,3,3]}}
:param input_annotation_file: annotation input based on dwv json export (https://github.com/ivmartel/dwv)
:param run_list: List of functions which should be run on run() function. Default list
with segmentataion is used if is set None.
"""
from imcut import pycut
default_segparams = {
'method': pycut.methods[0],
'pairwise_alpha_per_mm2': 40,
'use_boundary_penalties': False,
'boundary_penalties_sigma': 50}
self.iparams = {}
self.datapath = datapath
self.set_output_datapath(output_datapath)
self.sftp_username = sftp_username
self.sftp_password = sftp_password
self.input_datapath_start = input_datapath_start
self.crinfo = [[0, None], [0, None], [0, None]]
self.slab = data_plus.default_slab()
self.slab.update(slab)
self.output_label = output_label
self.working_voxelsize_mm = None
self.input_wvx_size = working_voxelsize_mm
# print segparams
# @TODO each axis independent alpha
self.segparams = default_segparams
self.segparams.update(segparams)
self.segmodelparams = default_segmodelparams
self.segmodelparams.update(segmodelparams)
self.series_number = series_number
self.autocrop = autocrop
self.autocrop_margin_mm = np.array(autocrop_margin_mm)
self.texture_analysis = texture_analysis
self.segmentation_smoothing = segmentation_smoothing
self.smoothing_mm = smoothing_mm
self.volume_blowup = volume_blowup
self.edit_data = edit_data
self.roi = roi
self.data3d = data3d
self.seeds = seeds
self.segmentation = None
self.processing_time = None
self.experiment_caption = experiment_caption
self.lisa_operator_identifier = lisa_operator_identifier
# self.version = qmisc.getVersionString()
# if self.version is None:
self.version = "1.21.0"
self.viewermax = viewermax
self.viewermin = viewermin
self.volume_unit = volume_unit
self.organ_interactivity_counter = 0
self.dcmfilelist = None
self.save_filetype = save_filetype
self.vessel_tree = {}
self.debug_mode = debug_mode
self.gui_update = None
self.segmentation_alternative_params = config_default.default_segmentation_alternative_params
if segmentation_alternative_params is not None:
self.segmentation_alternative_params.update(segmentation_alternative_params)
self.saved_seeds = {}
# self._json_description
# SegPostprocPars = namedtuple(
# 'SegPostprocPars', [
# 'smoothing_mm',
# 'segmentation_smoothing',
# 'volume_blowup',
# 'snakes',
# 'snakes_method',
# 'snakes_params']
# )
self.cache = cachef.CacheFile(cache_filename)
self.seg_postproc_pars = {
'smoothing_mm': smoothing_mm,
'segmentation_smoothing': segmentation_smoothing,
'volume_blowup': volume_blowup,
'snakes': False,
'snakes_method': 'ACWE',
'snakes_params': {'smoothing': 1, 'lambda1': 100, 'lambda2': 1},
'snakes_niter': 20,
# 'postproc_working_voxelsize': [1.0, 1.0, 1.0],
'postproc_working_voxelsize': 'orig',
}
self.seg_postproc_pars.update(seg_postproc_pars)
self.seg_preproc_pars = {
'use_automatic_segmentation': True,
}
self.seg_preproc_pars.update(seg_preproc_pars)
self.after_load_processing = {
'run_automatic_liver_seeds': False,
}
self.after_load_processing.update(after_load_processing)
self.apriori = None
# seg_postproc_pars.update(seg_postproc_pars)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# self.seg_postproc_pars = SegPostprocPars(**seg_postproc_pars_default)
# self.run = run
self.run_organ_segmentation = run_organ_segmentation
self.run_vessel_segmentation = run_vessel_segmentation
self.run_vessel_segmentation_params = run_vessel_segmentation_params
#
oseg_input_params = locals()
oseg_input_params = self.__clean_oseg_input_params(oseg_input_params)
logger.debug("oseg_input_params")
logger.debug(str(oseg_input_params))
self.oseg_input_params = oseg_input_params
self.input_annotaion_file = input_annotation_file
self.output_annotaion_file = output_annotation_file
from . import runner
self.runner = runner.Runner(self)
self.init_run_list(run_list)
self.get_series_number_callback = get_series_number_callback
if data3d is None or metadata is None:
# if 'datapath' in self.iparams:
# datapath = self.iparams['datapath']
if datapath is not None:
reader = datareader.DataReader()
datap = reader.Get3DData(
datapath, dataplus_format=True,
get_series_number_callback=get_series_number_callback)
# self.iparams['series_number'] = metadata['series_number']
# self.iparams['datapath'] = datapath
self.import_dataplus(datap)
else:
# self.data3d = data3d
# default values are updated in next line
mindatap = {'series_number': -1,
'voxelsize_mm': 1,
'datapath': None,
'data3d': data3d
}
mindatap.update(metadata)
self.import_dataplus(mindatap)
# self.iparams['series_number'] = self.metadata['series_number']
# self.iparams['datapath'] = self.metadata['datapath']
# self.import_dataplus()
# def importDataPlus(self, datap):
# """
# Function for input data
# """
# self.data3d = datap['data3d']
# self.crinfo = datap['crinfo']
# self.segmentation = datap['segmentation']
# self.slab = datap['slab']
# self.voxelsize_mm = datap['voxelsize_mm']
# self.orig_shape = datap['orig_shape']
# self.seeds = datap[
# 'processing_information']['organ_segmentation']['seeds']
def set_output_datapath(self, output_datapath):
if output_datapath is None:
output_datapath = '~/lisa_data'
self.output_datapath = os.path.expanduser(output_datapath)
def update(self):
from . import update_stable
update_stable.make_update()
# import subprocess
# print subprocess.call(['conda', 'update', '-y', '-c', 'mjirik', '-c', 'SimpleITK', 'lisa']) #, shell=True)
def add_to_segmentation(self, source_segmentation, target_labels, source_labels=None):
"""
Stores requested label from temp segmentation into slab segmentation.
Zero label is ignored.
:param source_segmentation: ndimage
:param target_labels: list of (string or numeric) labels for output segmentation. Labels are paired
with source_labels if possible.
:param source_labels: list of numeric labels for source segmentation
:return:
"""
if source_labels is None:
source_labels = list(np.unique(source_segmentation))
# kick zero
if 0 in source_labels:
source_labels.pop(source_labels.index(0))
for labels in zip(source_labels, target_labels):
src, dst = labels
self.segmentation[source_segmentation==src] = self.nlabels(dst)
def update_parameters_based_on_label(self, label):
self.update_parameters(self.segmentation_alternative_params[label])
def update_parameters(self, params):
"""
:param params:
:return:
"""
if 'segparams' in params.keys():
self.segparams = params['segparams']
logger.debug('segparams updated')
if 'segmodelparams' in params.keys():
self.segmodelparams = params['segmodelparams']
logger.debug('segmodelparams updated')
if 'output_label' in params.keys():
# logger.debug("output label " + str(params["output_label"]))
# if type(params['output_label']) is str:
# key = params["output_label"]
# params["output_label"] = self.slab[key]
self.output_label = self.nlabels(params['output_label'])
logger.debug("'output_label' updated to " + str(self.nlabels(self.output_label, return_mode="str")))
if 'working_voxelsize_mm' in params.keys():
self.input_wvx_size = copy.copy(params['working_voxelsize_mm'])
self.working_voxelsize_mm = params['working_voxelsize_mm']
vx_size = self.working_voxelsize_mm
if np.isscalar(vx_size):
vx_size = ([vx_size] * 3)
vx_size = np.array(vx_size).astype(float)
self.working_voxelsize_mm = vx_size
logger.debug('working_voxelsize_mm updated')
if 'smoothing_mm' in params.keys():
self.smoothing_mm = params['smoothing_mm']
logger.debug('smoothing_mm updated')
if 'seg_postproc_pars' in params.keys():
self.seg_postproc_pars = params['seg_postproc_pars']
logger.debug('seg_postproc_pars updated')
if 'clean_seeds_after_update_parameters' in params.keys():
if self.seeds is not None:
self.seeds[...] = 0
logger.debug('clean_seeds_after_update_parameters')
def run_sss(self):
sseg = sss.SupportStructureSegmentation(
data3d=self.data3d,
voxelsize_mm=self.voxelsize_mm,
)
sseg.run()
# sseg.bone_segmentation()
# sseg.lungs_segmentation()
# sseg.heart_segmentation()
# TODO remove hack - force remove number 1 from segmentation
# this sould be fixed in sss
sseg.segmentation[sseg.segmentation == 1] = 0
self.segmentation = sseg.segmentation
self.slab = sseg.slab
def __clean_oseg_input_params(self, oseg_params):
"""
Used for storing input params of organ segmentation. Big data are not
stored due to big memory usage.
"""
oseg_params['data3d'] = None
oseg_params['segmentation'] = None
oseg_params.pop('self')
oseg_params.pop('pycut')
return oseg_params
def process_wvx_size_mm(self, metadata):
"""This function does something.
Args:
name (str): The name to use.
Kwargs:
state (bool): Current state to be in.
"""
# vx_size = self.working_voxelsize_mm
vx_size = self.input_wvx_size
if vx_size == 'orig':
vx_size = metadata['voxelsize_mm']
elif vx_size == 'orig*2':
vx_size = np.array(metadata['voxelsize_mm']) * 2
elif vx_size == 'orig*4':
vx_size = np.array(metadata['voxelsize_mm']) * 4
if np.isscalar(vx_size):
vx_size = ([vx_size] * 3)
vx_size = np.array(vx_size).astype(float)
# if np.isscalar(vx_sizey):
# vx_size = (np.ones([3]) *vx_size).astype(float)
# self.iparams['working_voxelsize_mm'] = vx_size
self.working_voxelsize_mm = vx_size
# return vx_size
def load_data(self, datapath):
self.datapath = datapath
reader = datareader.DataReader()
# seg.data3d, metadata =
datap = reader.Get3DData(self.datapath, dataplus_format=True)
# rint datap.keys()
# self.iparams['series_number'] = self.metadata['series_number']
# self.iparams['datapath'] = self.datapath
self.import_dataplus(datap)
def get_slab_value(self, label, value=None):
value = data_plus.get_slab_value(self.slab, label, value)
if self.gui_update is not None:
self.gui_update()
return value
# def sliver_compare_with_other_volume_from_path(self, filepath, label1=1, label2=2):
# evaluation = self.sliver_compare_with_other_volume(segmentation_datap, label1=label1, label2=label2)
# return evaluation
def sliver_compare_with_other_volume(self, segmentation2_datap, label1=1, label2=1):
"""
Compares actual Lisa data with other which are given by
segmentation_datap. That means
segmentation_datap = {
'segmentation': 3d np.array,
'crinfo': information about crop (optional)
}
"""
# if there is no segmentation, data can be stored in data3d. It is the
# way how are data stored in sliver.
if 'segmentation' in segmentation2_datap.keys():
segm_key = 'segmentation'
else:
segm_key = 'data3d'
if 'crinfo' in segmentation2_datap.keys():
data3d_segmentation = qmisc.uncrop(
segmentation2_datap[segm_key],
segmentation2_datap['crinfo'],
self.orig_shape)
else:
data3d_segmentation = segmentation2_datap[segm_key]
pass
# now we can uncrop actual Lisa data
data3d_segmentation_actual = qmisc.uncrop(
self.segmentation,
self.crinfo,
self.orig_shape)
# label1 = 1
# label2 = segmentation_label
# from PyQt5.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# print('unique data1 ', np.unique(data3d_segmentation_actual))
# print('unique data2 ', np.unique(data3d_segmentation))
# print("set label1 and label2")
# print("then press 'c' and 'Enter'")
# import pdb; pdb.set_trace() # noqa BREAKPOINT
evaluation, diff = volumetry_evaluation.compare_volumes_sliver(
# imma.segmentation_labels.se
ima.select_labels(data3d_segmentation_actual, label1).astype(np.int8),
ima.select_labels(data3d_segmentation, label2).astype(np.int8),
# data3d_segmentation_actual == label1,
# data3d_segmentation == label2,
self.voxelsize_mm,
return_diff=True
)
# score = volumetry_evaluation.sliver_score_one_couple(evaluation)
segdiff = qmisc.crop(
(diff),
self.crinfo)
return evaluation, segdiff
def segm_smoothing(self, sigma_mm, labels="liver", background_label="none"):
"""
Shape of output segmentation is smoothed with gaussian filter.
Sigma is computed in mm
"""
segmentation_general.segmentation_smoothing(
self.segmentation,
sigma_mm,
labels=labels,
voxelsize_mm=self.voxelsize_mm,
slab=self.slab,
background_label=background_label,
volume_blowup=self.volume_blowup,
)
# import scipy.ndimage
def minimize_slab(self):
imsl.minimize_slab(self.slab, self.segmentation)
def select_label(self, labels):
"""
Return ndimage with selected segmentation
:param labels:
:return:
"""
selected_segmentation = ima.select_labels(self.segmentation, labels=labels, slab=self.slab)
return selected_segmentation
def import_segmentation_from_file(self, filepath, new_label_selection_method=None):
"""
Loads data from file. Expected are uncropped data.
The old label is set to 0 and then new label is loaded.
:param new_label_selection_method: should be compatible with lisa_window.ui_select_label()
ui_select_label(headline, text_inside="select from existing labels or write a new one",
return_i=True, return_str=True, multiple_choice=False):
"""
# logger.debug("import segmentation from file")
# logger.debug(str(self.crinfo))
# reader = datareader.DataReader()
# datap = reader.Get3DData(filepath, dataplus_format=True)
datap = io3d.read(filepath, dataplus_format=True)
segmentation = datap['data3d']
segmentation = qmisc.crop(segmentation, self.crinfo)
logger.debug(str(segmentation.shape))
#
nzlabels = set(np.unique(segmentation)) - {0}
print("nzlabels: ", nzlabels)
for label in nzlabels:
if new_label_selection_method:
headline = f"Select new label for input label {label}"
text_inside = f"Actual segmentation labels: {self.slab} \nImported labels: {nzlabels}\n"
label_out = new_label_selection_method(headline, text_inside=text_inside, return_i=True, return_str=False)
else:
label_out = label
self.segmentation_replacement(
segmentation_new=segmentation,
label_new=label,
label=label_out
)
# self.segmentation = segmentation
self.add_missing_labels()
def import_dataplus(self, dataplus):
datap = {
'dcmfilelist': None,
}
datap.update(dataplus)
dpkeys = datap.keys()
self.data3d = datap['data3d']
if self.roi is not None:
self.crop(self.roi)
self.voxelsize_mm = np.array(datap['voxelsize_mm'])
self.process_wvx_size_mm(datap)
self.autocrop_margin = self.autocrop_margin_mm / self.voxelsize_mm
if 'orig_shape' in dpkeys:
self.orig_shape = datap['orig_shape']
else:
self.orig_shape = self.data3d.shape
if 'crinfo' in dpkeys:
self.crinfo = datap['crinfo']
if 'slab' in dpkeys:
self.slab = datap['slab']
if ('segmentation' in dpkeys) and datap['segmentation'] is not None:
self.segmentation = datap['segmentation']
else:
self.segmentation = np.zeros(self.data3d.shape, dtype=np.int8)
if 'vessel_tree' in dpkeys:
self.vessel_tree = datap['vessel_tree']
if ('apriori' in dpkeys) and datap['apriori'] is not None:
self.apriori = datap['apriori']
else:
self.apriori = None
if 'saved_seeds' in dpkeys:
self.saved_seeds = datap['saved_seeds']
else:
self.saved_seeds = {}
self.dcmfilelist = datap['dcmfilelist']
if "pairwise_alpha_pep_mm2" in self.segparams:
self.segparams['pairwise_alpha'] = \
self.segparams['pairwise_alpha_per_mm2'] / \
np.mean(self.working_voxelsize_mm)
self.__import_dataplus_seeds(datap)
# chci, abych nepřepisoval uložené seedy
if self.after_load_processing['run_automatic_liver_seeds']:
if self.seeds is None or (self.seeds == 0).all():
self.automatic_liver_seeds()
# try read prev information about time processing
try:
time_prev = datap['processing_information']['processing_time']
self.processing_time = time_prev
self.time_start = datetime.datetime.now() - time_prev
except:
self.time_start = datetime.datetime.now()
def __import_dataplus_seeds(self, datap):
"""
:type self: seeds are changed
"""
try:
self.seeds = datap['processing_information'][
'organ_segmentation']['seeds']
except:
logger.info('seeds not found in dataplus')
# if dicomdir is readed after something with seeds, seeds needs to be reseted
# self.seeds = None
# for each mm on boundary there will be sum of penalty equal 10
if self.seeds is None:
logger.debug("Seeds are generated")
self.seeds = np.zeros(self.data3d.shape, dtype=np.int8)
logger.debug("unique seeds labels " + str(np.unique(self.seeds)))
info_text = 'dir ' + str(self.datapath)
if "series_number" in datap.keys():
info_text += ", series_number " + str(datap['series_number'])
info_text += 'voxelsize_mm ' + str(self.voxelsize_mm)
logger.info(info_text)
def crop(self, tmpcrinfo):
"""
Function makes crop of 3d data and seeds and stores it in crinfo.
tmpcrinfo: temporary crop information
"""
# print('sedds ', str(self.seeds.shape), ' se ',
# str(self.segmentation.shape), ' d3d ', str(self.data3d.shape))
self.data3d = qmisc.crop(self.data3d, tmpcrinfo)
# No, size of seeds should be same as data3d
if self.seeds is not None:
self.seeds = qmisc.crop(self.seeds, tmpcrinfo)
if self.segmentation is not None:
self.segmentation = qmisc.crop(self.segmentation, tmpcrinfo)
self.crinfo = qmisc.combinecrinfo(self.crinfo, tmpcrinfo)
logger.debug("crinfo " + str(self.crinfo))
# print '----sedds ', self.seeds.shape, ' se ',
# self.segmentation.shape,\
# ' d3d ', self.data3d.shape
def json_annotation_import(self, json_annotation_file=None):
"""
:param json_annotation_file: json file from dwm (https://github.com/ivmartel/dwv)
:return:
"""
# TODO implementovat Jiří Vyskočil
# načtení vstupní anotace
# zápis do self.seeds
# lisu pak lze volat:
# python -m lisa -iaf dwv_export.json -dd input_data.pklz -o output_data.pklz -ni
#
# -ni dělá automatické spuštění segmentace
# po načtení je spuštěn graph cut a výstup je uložen do output_data.pklz
# takhle lze volat tuhle funkci s argumentem i bez něj
if json_annotation_file is None:
json_annotation_file = self.input_annotaion_file
datap = {}
datap['data3d'] = self.data3d
datap['segmentation'] = self.segmentation
datap['slab'] = self.slab
datap['voxelsize_mm'] = self.voxelsize_mm
jsonfile = json.load(open(json_annotation_file))
jd.get_segdata(jsonfile, datap)
if "porta" in jd.description.keys():
th = jd.description["porta"]["threshold"]
self.run_vessel_segmentation = True
self.run_vessel_segmentation_params = dict(
threshold=th,
inner_vessel_label="porta",
organ_label="liver",
seeds=jd.get_vesselpoint_in_seeds(jsonfile, "porta", self.data3d.shape),
interactivity=False)
else:
self.run_vessel_segmentation = False
self.seeds = jd.get_seeds(datap, "liver")
self.run_organ_segmentation = True
def _interactivity_begin(self):
from imcut import pycut
logger.debug('_interactivity_begin()')
# TODO make copy and work with it
# TODO really make the copy and work with it
if self.segmentation is None:
self.segmentation = np.zeros_like(self.data3d, dtype=np.int8)
data3d_tmp = self.data3d
if self.seg_preproc_pars['use_automatic_segmentation']:
data3d_tmp = self.data3d.copy()
data3d_tmp[(self.segmentation > 0) & (self.segmentation != self.output_label)] = -1000
# print 'zoom ', self.zoom
# print 'svs_mm ', self.working_voxelsize_mm
self.zoom = self.voxelsize_mm / (1.0 * self.working_voxelsize_mm)
import warnings
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
data3d_res = scipy.ndimage.zoom(
self.data3d,
self.zoom,
mode='nearest',
order=1
).astype(np.int16)
logger.debug('pycut segparams ' + str(self.segparams) +
'\nmodelparams ' + str(self.segmodelparams)
)
# insert feature function instead of string description
from . import organ_model
self.segmodelparams = organ_model.add_fv_extern_into_modelparams(self.segmodelparams)
if "pairwise_alpha_pep_mm2" in self.segparams:
self.segparams['pairwise_alpha'] = \
self.segparams['pairwise_alpha_per_mm2'] / \
np.mean(self.working_voxelsize_mm)
if 'method' not in self.segparams.keys() or \
self.segparams['method'] in pycut.accepted_methods:
from .audiosupport import beep
igc = pycut.ImageGraphCut(
# self.data3d,
data3d_res,
segparams=self.segparams,
voxelsize=self.working_voxelsize_mm,
modelparams=self.segmodelparams,
volume_unit='ml',
interactivity_loop_finish_fcn=beep,
debug_images=False
)
# elif self.segparams['method'] == '':
else:
import liver_segmentation
igc = liver_segmentation.LiverSegmentation(
data3d_res,
segparams=self.segparams,
voxelsize=self.working_voxelsize_mm,
)
if self.apriori is not None:
apriori_res = misc.resize_to_shape(
# seeds_res = scipy.ndimage.zoom(
self.apriori,
data3d_res.shape,
)
igc.apriori = apriori_res
# igc.modelparams = self.segmodelparams
# @TODO uncomment this for kernel model
# igc.modelparams = {
# 'type': 'kernel',
# 'params': {}
# }
# if self.iparams['seeds'] is not None:
if self.seeds is not None:
logger.debug("adding seeds to interactive graph_cut")
logger.opt(lazy=True).debug("seeds unique {un}", un=lambda:np.unique(self.seeds, return_counts=True))
# logger.debug(f"seeds dtype: {self.seeds.dtype}")
logger.debug(f"resize: input size: {self.seeds.shape}, output size: {data3d_res.shape}")
seeds_res = misc.resize_to_shape(
# seeds_res = scipy.ndimage.zoom(
self.seeds,
data3d_res.shape,
# mode='nearest',
mode='constant',
order=0
)
# import skimage.transform
# segm_orig_scale = skimage.transform.resize(
# self.seeds, data3d_res.shape, order=0, preserve_range=True, mode="constant"
# )
# import ipdb;ipdb.set_trace()
# logger.debug(f"compare unique {np.unique(self.seeds) == np.unique(data3d_res)}")
logger.opt(lazy=True).debug("resized seeds unique {un}", un=lambda:np.unique(seeds_res))
# logger.debug(f"seeds res dtype: {seeds_res.dtype}")
seeds_res = seeds_res.astype(np.int8)
logger.opt(lazy=True).debug("resized seeds unique {un}", un=lambda:np.unique(seeds_res))
logger.debug(f"seeds res dtype: {seeds_res.dtype}")
igc.set_seeds(seeds_res)
logger.opt(lazy=True).debug("igc seeds unique {un}", un=lambda:np.unique(igc.seeds))
# tohle je tu pro to, aby bylo možné přidávat nově objevené segmentace k těm starým
# jinak jsou stará data přepsána
if self.segmentation is not None:
self.segmentation_prev = copy.copy(self.segmentation)
else:
self.segmentation_prev = None
return igc
def sync_lisa_data(self, username, password, host="147.228.47.162", callback=printTotals):
self.sftp_username = username
self.create_lisa_data_dir_tree()
import sftpsync
import paramiko
paramiko_log = os.path.join(self.output_datapath, 'paramiko.log')
paramiko.util.log_to_file(paramiko_log)
sftp = sftpsync.Sftp(host=host, username=username, password=password)
localfrom = self._output_datapath_from_server.replace(os.sep, '/')
localto = self._output_datapath_to_server.replace(os.sep, '/')
# this makes sure that all paths ends with slash
if not localfrom.endswith('/'):
localfrom += '/'
if not localto.endswith('/'):
localto += '/'
remotefrom = "from_server/"
remoteto = "to_server/"
exclude = []
logger.info("Download started\nremote from {}\nlocal from {}".format(remotefrom, localfrom))
logger.info("from")
sftp.sync(remotefrom, localfrom, download=True, exclude=exclude, delete=False, callback=callback)
logger.info("Download finished")
logger.info("Upload started\nremote to {}\nlocal to {}".format(remoteto, localto))
sftp.sync(localto, remoteto, download=False, exclude=exclude, delete=False, callback=callback)
logger.info("Upload finished")
def __resize_to_orig(self, igc_seeds):
# @TODO remove old code in except part
self.segmentation = misc.resize_to_shape(
self.segmentation,
self.data3d.shape,
self.zoom
)
self.seeds = misc.resize_to_shape(
igc_seeds,
self.data3d.shape,
self.zoom
).astype(np.uint8)
# try:
# # rint 'pred vyjimkou'
# # aise Exception ('test without skimage')
# # rint 'za vyjimkou'
# import skimage
# import skimage.transform
# # Now we need reshape seeds and segmentation to original size
#
# segm_orig_scale = skimage.transform.resize(
# self.segmentation, self.data3d.shape, order=0,
# preserve_range=True
# )
#
# seeds = skimage.transform.resize(
# igc_seeds, self.data3d.shape, order=0,
# preserve_range=True
# )
#
# # self.segmentation = segm_orig_scale
# self.seeds = seeds
# logger.debug('resize to orig with skimage')
# except:
#
# segm_orig_scale = scipy.ndimage.zoom(
# self.segmentation,
# 1.0 / self.zoom,
# mode='nearest',
# order=0
# ).astype(np.int8)
# seeds = scipy.ndimage.zoom(
# igc_seeds,
# 1.0 / self.zoom,
# mode='nearest',
# order=0
# )
# logger.debug('resize to orig with scipy.ndimage')
#
# # @TODO odstranit hack pro oříznutí na stejnou velikost
# # v podstatě je to vyřešeno, ale nechalo by se to dělat elegantněji v zoom
# # tam je bohužel patrně bug
# # rint 'd3d ', self.data3d.shape
# # rint 's orig scale shape ', segm_orig_scale.shape
# shp = [
# np.min([segm_orig_scale.shape[0], self.data3d.shape[0]]),
# np.min([segm_orig_scale.shape[1], self.data3d.shape[1]]),
# np.min([segm_orig_scale.shape[2], self.data3d.shape[2]]),
# ]
# # elf.data3d = self.data3d[0:shp[0], 0:shp[1], 0:shp[2]]
# # mport ipdb; ipdb.set_trace() # BREAKPOINT
#
# self.segmentation = np.zeros(self.data3d.shape, dtype=np.int8)
# self.segmentation[
# 0:shp[0],
# 0:shp[1],
# 0:shp[2]] = segm_orig_scale[0:shp[0], 0:shp[1], 0:shp[2]]
#
# del segm_orig_scale
#
# self.seeds[
# 0:shp[0],
# 0:shp[1],
# 0:shp[2]] = seeds[0:shp[0], 0:shp[1], 0:shp[2]]
def _interactivity_end(self, igc):
"""
This is called after processing step. All data are rescaled to original
resolution.
"""
if self.debug_mode:
print("interactivity_end unique segmentation: {}".format(np.unique(self.segmentation)))
print("segparams:", igc.segparams)
print("modelparams:", igc.modelparams)
logger.debug('_interactivity_end()')
self.__resize_to_orig(igc.seeds)
self.organ_interactivity_counter = igc.interactivity_counter
logger.debug("org inter counter " +
str(self.organ_interactivity_counter))
logger.debug('nonzero segm ' + str(np.nonzero(self.segmentation)))
# if False:
if False:
# TODO dodělat postprocessing PV
import segmentation
outputSegmentation = segmentation.vesselSegmentation( # noqa
self.data3d,
self.segmentation,
threshold=-1,
inputSigma=0.15,
dilationIterations=10,
nObj=1,
biggestObjects=False,
seeds=(self.segmentation > 0).astype(np.int8),
useSeedsOfCompactObjects=True,
interactivity=True,
binaryClosingIterations=2,
binaryOpeningIterations=0)
self._segmentation_postprocessing()
# @TODO make faster
# spojení staré a nové segmentace
# from PyQt4.QtCore import pyqtRemoveInputHook; pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
if self.segmentation_prev is None:
# pokud neznáme žádnou předchozí segmentaci, tak se chováme jako dříve
self.segmentation[self.segmentation == 1] = self.nlabels(self.output_label)
else:
# remove old pixels for this label
self.segmentation_replacement(
segmentation_new=self.segmentation,
segmentation=self.segmentation_prev,
label=self.output_label,
label_new=1,
)
# self.segmentation_prev[self.segmentation_prev == self.output_label] = 0
# set new labels
# self.segmentation_prev[np.where(self.segmentation == 1)] = self.output_label
# clean up
self.segmentation = self.segmentation_prev
self.segmentation_prev = None
# rint 'autocrop', self.autocrop
if self.autocrop is True:
# rint
# mport pdb; pdb.set_trace()
tmpcrinfo = qmisc.crinfo_from_specific_data(
self.segmentation,
self.autocrop_margin)
self.crop(tmpcrinfo)
if self.texture_analysis not in (None, False):
import texture_analysis
# doplnit nějaký kód, parametry atd
# elf.orig_scale_segmentation =
# texture_analysis.segmentation(self.data3d,
# self.orig_scale_segmentation, params = self.texture_analysis)
self.segmentation = texture_analysis.segmentation(
self.data3d,
self.segmentation,
self.voxelsize_mm
)
# set label number
# !! pomaly!!!
#
logger.debug('self.slab')
logger.debug(str(self.slab))
self.processing_time = (
datetime.datetime.now() - self.time_start).total_seconds()
logger.debug('processing_time = ' + str(self.processing_time))
def segmentation_replacement(
self,
segmentation_new,
label,
label_new=1,
segmentation=None,
**kwargs
):
"""
Remove old segmentation with label and put in new data from segmentation_new with label_new
(and ignore other labels)
:param segmentation_new: input segmentation
:param label: output label
:param label_new: input label
:param segmentation:
:param kwargs:
:return:
"""
if segmentation is None:
segmentation = self.segmentation
segmentation_general.segmentation_replacement(
segmentation,
segmentation_new,
label_new=label_new,
label=label,
slab=self.slab,
**kwargs
)
def _segmentation_postprocessing(self):
"""
:segmentation_smoothing:
"""
logger.debug(str(self.seg_postproc_pars))
if self.seg_postproc_pars['segmentation_smoothing']:
# if self.segmentation_smoothing:
self.segm_smoothing(self.seg_postproc_pars['smoothing_mm'])
if self.seg_postproc_pars['snakes']:
import morphsnakes as ms
logger.debug('Making snakes')
if self.seg_postproc_pars['snakes_method'] is 'ACWE':
method = ms.MorphACWE
elif self.seg_postproc_pars['snakes_method'] is 'GAC':
method = ms.MorphGAC
else:
logger.error('Unknown snake method')
return
sp = self.seg_postproc_pars['snakes_params']
if 'seeds' in sp.keys() and sp['seeds'] is True:
sp['seeds'] = self.seeds
logger.debug('snakes')
d3d = imma.image.resize_to_mm(
self.data3d,
self.voxelsize_mm,
self.seg_postproc_pars['postproc_working_voxelsize'])
segw = imma.image.resize_to_mm(
self.segmentation,
self.voxelsize_mm,
self.seg_postproc_pars['postproc_working_voxelsize'])
macwe = method(
d3d,
# self.data3d,
**self.seg_postproc_pars['snakes_params']
)
macwe.levelset = (
# self.segmentation == self.slab['liver']
segw == self.slab['liver']
).astype(np.uint8)
macwe.run(self.seg_postproc_pars['snakes_niter'])
seg = imma.image.resize_to_shape(macwe.levelset, self.data3d.shape)
# for debug visualization preprocessing use fallowing line
# self.segmentation[seg == 1] += 1
self.segmentation[seg == 1] = self.slab['liver']
logger.debug('postprocessing with snakes finished')
# def interactivity(self, min_val=800, max_val=1300):
# @TODO generovat QApplication
def interactivity(self, min_val=None, max_val=None, layout=None):
from seededitorqt.seed_editor_qt import QTSeedEditor
import_gui()
logger.debug('interactivity')
# if self.edit_data:
# self.data3d = self.data_editor(self.data3d)
igc = self._interactivity_begin()
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
logger.opt(lazy=True).debug("seeds min: {min}, max: {max}, ",
max=lambda: np.max(igc.seeds), min=lambda: np.min(igc.seeds))
if layout is None:
pyed = QTSeedEditor(igc.img,
seeds=igc.seeds,
modeFun=igc.interactivity_loop,
voxelSize=igc.voxelsize,
volume_unit='ml')
else:
from imcut import QTSeedEditorWidget
pyed = QTSeedEditorWidget(igc.img,
seeds=igc.seeds,
modeFun=igc.interactivity_loop,
voxelSize=igc.voxelsize,
volume_unit='ml')
layout.addWidget(pyed)
# set window
if min_val is None:
min_val = np.min(self.data3d)
if max_val is None:
max_val = np.max(self.data3d)
window_c = ((max_val + min_val) / 2)
window_w = (max_val - min_val)
pyed.changeC(window_c)
pyed.changeW(window_w)
# from PyQt5 import QtCore
# QtCore.pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
pyed.exec_()
# import ipdb; ipdb.set_trace()
# import ipdb; ipdb.set_trace()
# @TODO někde v igc.interactivity() dochází k přehození nul za jedničy,
# tady se to řeší hackem
if igc.segmentation is not None:
self.segmentation = (igc.segmentation == 0).astype(np.int8)
self._interactivity_end(igc)
def ninteractivity(self):
from imcut import pycut
"""Function for automatic (noninteractiv) mode."""
# mport pdb; pdb.set_trace()
igc = self._interactivity_begin()
# gc.interactivity()
# igc.make_gc()
igc.run()
if ('method' not in self.segparams.keys()) or (self.segparams['method'] in pycut.methods):
logger.debug('ninteractivity seg method GC')
self.segmentation = (igc.segmentation == 0).astype(np.int8)
else:
logger.debug('ninteractivity seg method other')
self.segmentation = np.asarray(igc.segmentation, dtype=np.int8)
self._interactivity_end(igc)
def export(self):
slab = {}
slab['none'] = 0
slab['liver'] = 1
slab['lesions'] = 6
slab.update(self.slab)
data = {}
data['version'] = (1, 0, 1)
data['data3d'] = self.data3d
data['crinfo'] = self.crinfo
data['segmentation'] = self.segmentation
data['apriori'] = self.apriori
data['slab'] = slab
data['voxelsize_mm'] = self.voxelsize_mm
data['orig_shape'] = self.orig_shape
data['vessel_tree'] = self.vessel_tree
data["saved_seeds"] = self.saved_seeds
processing_information = {
'organ_segmentation': {
'processing_time': self.processing_time,
'time_start': str(self.time_start),
'oseg_input_params': self.oseg_input_params,
'organ_interactivity_counter':
self.organ_interactivity_counter,
'seeds': self.seeds # qmisc.SparseMatrix(self.seeds)
}
}
data['processing_information'] = processing_information
# from PyQt4 import QtCore
# QtCore.pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
# TODO add dcmfilelist
logger.debug("export()")
# ogger.debug(str(data))
logger.debug("org int ctr " + str(self.organ_interactivity_counter))
# ata["metadata"] = self.metadata
# mport pdb; pdb.set_trace()
return data
# def get_iparams(self):
# self.iparams['seeds'] = qmisc.SparseMatrix(self.iparams['seeds'])
# return self.iparams
def automatic_liver_seeds(self):
seeds, likdif = organ_seeds.automatic_liver_seeds(self.data3d, self.seeds, self.voxelsize_mm)
# přenastavíme na čísla mezi nulou a jedničkou, druhá konstanta je nastavena empiricky
self.apriori = boltzman(likdif, 0, 200).astype(np.float16)
def add_seeds_mm(self, z_mm, x_mm, y_mm, label, radius, width=1):
"""
Function add circle seeds to one slice with defined radius.
It is possible set more seeds on one slice with one dimension
x_mm, y_mm coordinates of circle in mm. It may be array.
z_mm = slice coordinates in mm. It may be array
:param label: one number. 1 is object seed, 2 is background seed
:param radius: is radius of circle in mm
:param width: makes circle with defined width (repeat circle every milimeter)
"""
data_manipulation.add_seeds_mm(
self.seeds, self.voxelsize_mm,
z_mm, x_mm, y_mm,
label,
radius, width
)
def lesionsLocalization(self):
""" Localization of lession """
from . import lesions
tumory = lesions.Lesions()
# tumory.overlay_test()
data = self.export()
tumory.import_data(data)
tumory.run_gui()
# tumory.automatic_localization()
self.segmentation = tumory.segmentation
def nlabels(self, label, label_meta=None, return_mode="num"):
"""
Add one or more labels if it is necessery and return its numeric values.
If "new" keyword is used and no other information is provided, the max + 1 label is created.
If "new" keyword is used and additional numeric info is provided, the number is used also as a key.
:param label: string, number or "new"
:param label_meta: string, number or "new
:param return_mode: "num" or "str" or "both".
:return:
"""
return ima.get_nlabels(self.slab, label, label_meta, return_mode=return_mode)
def add_missing_labels(self):
ima.add_missing_labels(self.segmentation, self.slab)
def segmentation_relabel(self, from_label, to_label):
"""
Relabel segmentation
:param from_label: int or string
:param to_label: int or `astring
:return:
"""
from_label = self.nlabels(from_label)
to_label = self.nlabels(to_label)
select = self.select_label(from_label)
self.segmentation[select] = to_label
def portalVeinSegmentation(self, inner_vessel_label="porta", organ_label="liver", outer_vessel_label=None,
forbidden_label=None, threshold=None, interactivity=True, seeds=None, **inparams):
"""
Segmentation of vein in specified volume. It is given by label "liver".
Usualy it is number 1. If there is no specified volume all image is
used.
If this function is used repeatedly (if there is some segmentation in
this image) all segmentation labeled as 'porta' is removed and setted
to 'liver' before processing.
You can use additional parameters from vesselSegmentation()
For example interactivity=False, biggestObjects=True, ...
:param forbidden_label: int or list of ints. Labels not included into segmentable area.
"""
from imtools import segmentation as imsegmentation
logger.info('segmentation max label ' + str(np.max(self.segmentation)))
if outer_vessel_label is None:
outer_vessel_label = inner_vessel_label
# if there is no organ segmentation, use all image
# self.add_slab_label_carefully(numeric_label=numeric_label, string_label=string_label)
# if there is no liver segmentation, use whole image
# if np.max(self.segmentation) == 0:
# self.segmentation = self.segmentation + 1
# remove prev segmentation
# TODO rozdělit na vnitřní a vnější část portální žíly
params = {
'threshold': threshold,
'inputSigma': 0.15,
'aoi_dilation_iterations': 10,
'nObj': 1,
'biggestObjects': False,
'useSeedsOfCompactObjects': True,
'interactivity': interactivity,
'binaryClosingIterations': 2,
'binaryOpeningIterations': 0,
'seeds': seeds,
}
params.update(inparams)
# logger.debug("ogran_label ", organ_label)
# target_segmentation = (self.segmentation == self.nlabels(organ_label)).astype(np.int8)
target_segmentation = ima.select_labels(
self.segmentation, organ_label, self.slab
)
outputSegmentation = imsegmentation.vesselSegmentation(
self.data3d,
voxelsize_mm=self.voxelsize_mm,
# target_segmentation,
segmentation=self.segmentation,
# organ_label=organ_label,
aoi_label=organ_label,
forbidden_label=forbidden_label,
slab=self.slab,
debug=self.debug_mode,
**params
)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
self.segmentation[(outputSegmentation == 1) & (target_segmentation == 1)] = self.nlabels(inner_vessel_label)
self.segmentation[(outputSegmentation == 1) & (target_segmentation == 0)] = self.nlabels(outer_vessel_label)
# self.__vesselTree(outputSegmentation, 'porta')
def saveVesselTree(self, textLabel, fn_yaml=None, fn_vtk=None):
"""
textLabel: 'porta' or 'hepatic_veins'
"""
self.__vesselTree(
self.select_label(textLabel),
# self.segmentation == self.slab[textLabel],
textLabel,
fn_yaml=fn_yaml,
fn_vtk=fn_vtk,
)
def export_seeds_to_files(self, fn_seeds):
"""
Export actual seeds and saved seeds into file based on given file name. Data are stored as image data (data3d).
:param fn_seeds:
:return:
"""
datap = self.export()
if "saved_seeds" in datap:
saved_seeds = datap.pop("saved_seeds")
for key in saved_seeds:
datap = self.export()
if "saved_seeds" in datap:
datap.pop("saved_seeds")
if "seeds" in datap:
datap.pop("seeds")
if "segmentation" in datap:
datap.pop("segmentation")
if "processing_information" in datap:
datap.pop('processing_information')
seeds = saved_seeds[key]
datap["data3d"] = seeds
basefn, ext = op.splitext(fn_seeds)
fn_seeds_key = basefn + "_" + key + ext
io3d.write(datap, fn_seeds_key)
if "seeds" in datap:
seeds = datap.pop("seeds")
if "segmentation" in datap:
datap.pop("segmentation")
if "processing_information" in datap:
datap.pop('processing_information')
datap["data3d"] = seeds
io3d.write(datap, fn_seeds)
def export_segmentations_to_files(self, fn_segmentation):
# datap = self.export()
if self.segmentation is not None:
for lab in np.unique(self.segmentation):
if lab == 0:
continue
strlabel = ima.get_nlabel(slab=self.slab, label=lab, return_mode="str")
datap = {
"data3d": (self.segmentation == lab).np.astype(np.uint8) * 255,
"voxelsize_mm": self.voxelsize_mm
}
basefn, ext = op.splitext(fn_segmentation)
fn_seeds_key = basefn + "_" + strlabel + ext
io3d.write(datap, fn_seeds_key)
def export_segmentation_to_file(self, fn_segmentation):
# datap = self.export()
logger.debug(f"export segmentation to file: {fn_segmentation}")
if self.segmentation is not None:
# basefn, ext = op.splitext(fn_segmentation)
# strlabel = ima.get_nlabel(slab=self.slab, label=lab, return_mode="str")
datap = {
"data3d": self.segmentation,
"voxelsize_mm": self.voxelsize_mm
}
io3d.write(datap, fn_segmentation)
def import_seeds_from_file(self, fn_seeds):
datap = io3d.read(fn_seeds, dataplus_format=True)
if "seeds" in datap and datap["seeds"] is not None:
self.seeds = datap["seeds"]
else:
self.seeds = datap["data3d"]
def __vesselTree(self, binaryData3d, textLabel, fn_yaml=None, fn_vtk=None):
import skelet3d
from skelet3d import skeleton_analyser # histology_analyser as skan
data3d_thr = (binaryData3d > 0).astype(np.int8)
data3d_skel = skelet3d.skelet3d(data3d_thr)
skan = skeleton_analyser.SkeletonAnalyser(
data3d_skel,
volume_data=data3d_thr,
voxelsize_mm=self.voxelsize_mm,
cut_wrong_skeleton=False,
aggregate_near_nodes_distance=0
)
stats = skan.skeleton_analysis(guiUpdateFunction=None)
if 'Graph' not in self.vessel_tree.keys():
self.vessel_tree['voxelsize_mm'] = self.voxelsize_mm
self.vessel_tree['Graph'] = {}
self.vessel_tree['Graph'][textLabel] = stats
# print sa.stats
logger.debug('save vessel tree to file')
if fn_yaml is None:
fn_yaml = self.get_standard_ouptut_filename(filetype='yaml', suffix='-vt-' + textLabel)
# save all skeletons to one special file
misc.obj_to_file(self.vessel_tree, fn_yaml, filetype='yaml')
logger.debug('save vessel tree to file - finished')
# generate vtk file
logger.debug('start to generate vtk file from vessel_tree')
import fibrous.tb_vtk
# import imtools.gen_vtk_tree
if fn_vtk is None:
fn_vtk = self.get_standard_ouptut_filename(filetype='vtk', suffix='-vt-' + textLabel)
# imtools.gen_vtk_tree.vt2vtk_file(self.vessel_tree, fn_vtk, text_label=textLabel)
fibrous.tb_vtk.vt2vtk_file(self.vessel_tree, fn_vtk, text_label=textLabel)
logger.debug('generating vtk file from vessel_tree finished')
def hepaticVeinsSegmentation(self):
from imtools import segmentation
outputSegmentation = segmentation.vesselSegmentation(
self.data3d,
self.segmentation,
threshold=None,
inputSigma=0.15,
dilationIterations=10,
nObj=1,
biggestObjects=False,
useSeedsOfCompactObjects=True,
interactivity=True,
binaryClosingIterations=2,
binaryOpeningIterations=0)
slab = {'hepatic_veins': 3}
slab.update(self.slab)
# rom PyQt4.QtCore import pyqtRemoveInputHook
# yqtRemoveInputHook()
# mport ipdb; ipdb.set_trace() # BREAKPOINT
self.slab = slab
self.segmentation[outputSegmentation == 1] = slab['hepatic_veins']
# skeletonizace
# self.__vesselTree(outputSegmentation, 'hepatic_veins')
def get_segmented_volume_size_mm3(self, labels="liver"):
"""Compute segmented volume in mm3, based on subsampeled data."""
voxelvolume_mm3 = np.prod(self.voxelsize_mm)
volume_mm3 = np.sum(ima.select_labels(self.segmentation, labels, self.slab)) * voxelvolume_mm3
return volume_mm3
def get_standard_ouptut_filename(self, filetype=None, suffix=''):
"""
It can be settet filename, or filename end with suffix.
"""
logger.debug(f"filetype: {filetype}, suffix: {suffix}")
if filetype is None:
filetype = self.save_filetype
output_dir = self.output_datapath
if self.datapath is not None:
pth, filename = op.split(op.normpath(self.datapath))
filename, ext = os.path.splitext(filename)
else:
filename = ''
if len(filename) > 0 and len(self.experiment_caption) > 0:
filename += "-"
filename += self.experiment_caption
# if savestring in ['a', 'A']:
# save renamed file too
filename = '' + filename + suffix + '.' + filetype
filepath = op.join(output_dir, filename)
filepath = misc.suggest_filename(filepath)
return filepath
def save_outputs(self, filepath=None):
""" Save input data, segmentation and all other metadata to file.
:param filepath:
:return:
"""
logger.debug(f"filepath: {filepath}")
data = self.export()
data['version'] = self.version # qmisc.getVersionString()
data['experiment_caption'] = self.experiment_caption
data['lisa_operator_identifier'] = self.lisa_operator_identifier
self.create_lisa_data_dir_tree()
if filepath is None:
filepath = self.get_standard_ouptut_filename()
# import ipdb; ipdb.set_trace()
import io3d
logger.debug("save outputs to file %s" % (filepath))
io3d.write(data, filepath)
if self.output_annotaion_file is not None:
self.json_annotation_export()
def json_annotation_export(self):
"""
:return:
"""
# TODO Jiri Vyskocil
output_file = self.output_annotaion_file
# self.segmentation
data = {}
data['segmentation'] = self.segmentation
data['slab'] = self.slab
jd.write_to_json(data, output_name=output_file)
def create_lisa_data_dir_tree(self):
lisa_data.create_lisa_data_dir_tree(self)
def save_seeds(self, name):
"""
Load stored seeds
:param name:
:return:
"""
seeds = copy.copy(self.seeds)
# self.saved_seeds[name] = scipy.sparse.csr_matrix(seeds)
self.saved_seeds[name] = seeds
def load_seeds(self, name):
"""
Store seeds for later use.
:param name:
:return:
"""
seeds = self.saved_seeds[name]
# if scipy.sparse.issparse(seeds):
# seeds = seeds.todense()
self.seeds = seeds
def get_list_of_saved_seeds(self):
return list(self.saved_seeds.keys())
def split_tissue_recusively_with_labeled_volumetric_vessel_tree(
self, organ_label, seeds,
organ_split_label_format_pattern="{label}{i}"
):
"""
:param organ_split_label_format_pattern: Specify the patter for naming the split of tissue
:param organ_label: label of organ to split
:param seeds: ndarray, 1 is trunk, 2 is first level branches, 3 is second level branches ...
:return:
"""
un_labels_dict = imma.labeled.unique_labels_by_seeds(self.segmentation, seeds)
# ještě mi chybí vědět, kdo je potomkem koho
# (level, tissue_to_split, trunk, branches
split_parameters = {1: []}
to_process = [(1, organ_label, un_labels_dict[1][0], un_labels_dict[2])]
while len(to_process) > 0:
actual = to_process.pop(0)
actual_level = actual[0]
actual_organ_label = actual[1]
actual_trunk_label = actual[2]
actual_branch_labels = actual[3]
split_labels_ij, connected_ij = self.split_tissue_with_labeled_volumetric_vessel_tree(
organ_label=actual_organ_label,
trunk_label=actual_trunk_label,
branch_labels=actual_branch_labels,
organ_split_label_format_pattern=organ_split_label_format_pattern
)
# prepare next branche
# level of next trunk
next_level = actual_level + 1
next_level_of_branches = next_level + 1
if next_level_of_branches <= len(un_labels_dict):
for i in range(len(split_labels_ij)):
import imma.dili as imdl
next_trunk = actual_branch_labels[i]
ind = imdl.find_in_list_of_lists(connected_ij, next_trunk)
if ind is None:
logger.error("There is strange error. This should be impossible.")
next_organ_label = split_labels_ij[ind]
next_branches = list(set(connected_ij[ind]).intersection(set(un_labels_dict[next_level_of_branches])))
if len(next_branches) > 1:
next = (next_level, next_organ_label, next_trunk, next_branches)
to_process.append(next)
return None, None
def split_tissue_with_labeled_volumetric_vessel_tree(
self, organ_label, trunk_label, branch_labels, split_labels=None,
organ_split_label_format_pattern="{label}{i}", on_missed_branch="split"):
"""
:param organ_label:
:param trunk_label:
:param branch_label1:
:param branch_label2:
:param seeds:
:param split_label1:
:param split_label2:
:return:
"""
# try:
# if trunk_label is None:
# trunk_label = self.segmentation[seeds == 1][0]
# if branch_labels is None:
# branch
# branch_label1 = self.segmentation[seeds == 2][0]
# if branch_label2 is None:
# branch_label2 = self.segmentation[seeds == 3][0]
# except IndexError:
# ValueError("Trunk and branches labels should be defined or seeds with values 1,2,3 are expected.")
trunk_label = self.nlabels(trunk_label)
branch_labels = self.nlabels(branch_labels)
split, connected = virtual_resection.split_tissue_on_labeled_tree(
self.segmentation,
trunk_label,
branch_labels=branch_labels,
tissue_segmentation=self.select_label(organ_label),
ignore_labels=[self.nlabels(organ_label)],
on_missed_branch=on_missed_branch
)
if split_labels is None:
split_labels = [None] * len(branch_labels)
for i in range(len(branch_labels)):
split_labels[i] = organ_split_label_format_pattern.format(
label=self.nlabels(organ_label, return_mode="str"),
i=(i + 1)
)
# if split_label1 is None:
# split_label1 = self.nlabels(organ_label, return_mode="str") + "1"
# # split_label1 = self.nlabels(split_label1)
# if split_label2 is None:
# split_label2 = self.nlabels(organ_label, return_mode="str") + "2"
# # split_label2 = self.nlabels(split_label2)
for i in range(len(split_labels)):
self.segmentation[split == (i + 1)] = self.nlabels(split_labels[i])
# self.segmentation[split == 1] = self.nlabels(split_label1)
# self.segmentation[split == 2] = self.nlabels(split_label2)
return split_labels, connected
# old version
# def rotate(self, angle, axes):
# self.data3d = scipy.ndimage.interpolation.rotate(self.data3d, angle, axes)
# self.segmentation = scipy.ndimage.interpolation.rotate(self.segmentation, angle, axes)
# self.seeds = scipy.ndimage.interpolation.rotate(self.seeds, angle, axes)
def resize_to_mm(self, voxelsize_mm):
"""
Resize voxelsize to defined milimeters.
:param voxelsize_mm:
:return:
"""
if np.isscalar(voxelsize_mm):
voxelsize_mm = self.data3d.ndim * [voxelsize_mm]
orig_voxelsize_mm = self.voxelsize_mm
orig_shape = self.data3d.shape
self.data3d = imma.image.resize_to_mm(self.data3d, voxelsize_mm=orig_voxelsize_mm, new_voxelsize_mm=voxelsize_mm)
if self.segmentation is not None:
dtype = self.segmentation.dtype
self.segmentation = imma.image.resize_to_mm(
self.segmentation, voxelsize_mm=orig_voxelsize_mm, new_voxelsize_mm=voxelsize_mm).astype(dtype)
if not hasattr(self, "orig_voxelsize_mm"):
# It this is first resize
self.orig_voxelsize_mm = orig_voxelsize_mm
self.orig_shape = orig_shape
self.voxelsize_mm = voxelsize_mm
def rotate(self, phi_deg, theta_deg=None, phi_axes=(1, 2), theta_axes=(0, 1), **kwargs):
self.data3d = ima.rotate(self.data3d, phi_deg, theta_deg)
self.segmentation = ima.rotate(self.segmentation, phi_deg, theta_deg)
self.seeds = ima.rotate(self.seeds, phi_deg, theta_deg)
def random_rotate(self):
"""
Rotate data3d, segmentation and seeds with random rotation
:return:
"""
# TODO independent on voxlelsize (2016-techtest-rotate3d.ipynb)
phi_deg, theta_deg = ima.random_rotate_paramteres()
self.rotate(phi_deg, theta_deg)
# old version
# xi1 = np.random.rand()
# xi2 = np.random.rand()
#
# # theta = np.arccos(np.sqrt(1.0-xi1))
# theta = np.arccos(1.0 - (xi1 * 1))
# phi = xi2 * 2 * np.pi
#
# # xs = np.sin(theta) * np.cos(phi)
# # ys = np.sin(theta) * np.sin(phi)
# # zs = np.cos(theta)
#
# phi_deg = np.degrees(phi)
# self.rotate(phi_deg, (1, 2))
# theta_deg = np.degrees(theta)
# self.rotate(theta_deg, (0, 1))
def mirror_z_axis(self):
"""
mirror data3d, segmentation and seeds Z-zaxis
:return:
"""
self.data3d = self.data3d[-1::-1]
if self.segmentation is not None:
self.segmentation = self.segmentation[-1::-1]
if self.seeds is not None:
self.seeds = self.seeds[-1::-1]
def save_input_dcm(self, filename):
# TODO add
logger.debug('save dcm')
dw = datawriter.DataWriter()
dw.Write3DData(self.data3d, filename, filetype='dcm',
metadata={'voxelsize_mm': self.voxelsize_mm})
def save_outputs_dcm(self, filename):
# TODO add
logger.debug('save dcm')
dw = datawriter.DataWriter()
dw.Write3DData(self.segmentation.astype(np.int16), filename,
filetype='dcm',
metadata={'voxelsize_mm': self.voxelsize_mm})
def save_outputs_dcm_overlay(self):
# TODO add
logger.debug('save dcm')
from PyQt5.QtCore import pyqtRemoveInputHook
pyqtRemoveInputHook()
# mport ipdb; ipdb.set_trace() # BREAKPOINT
odp = self.output_datapath
pth, filename = op.split(op.normpath(self.datapath))
filename += "-" + self.experiment_caption
# f savestring in ['ad']:
# save to DICOM
filepath = 'dicom-' + filename
filepath = os.path.join(odp, filepath)
filepath = misc.suggest_filename(filepath)
output_dicom_dir = filepath
data = self.export()
# mport ipdb; ipdb.set_trace() # BREAKPOINT
overlays = {
3:
(data['segmentation'] == self.output_label).astype(np.int8)
}
if self.dcmfilelist is not None:
datawriter.saveOverlayToDicomCopy(
self.dcmfilelist,
output_dicom_dir, overlays,
data['crinfo'], data['orig_shape'])
return output_dicom_dir
def load_segmentation_from_dicom_overlay(self, dirpath=None):
"""
Get overlay from dicom file stack
:param dirpath:
:return:
"""
if dirpath is None:
dirpath = self.datapath
reader = datareader.DataReader()
data3d, metadata = reader.Get3DData(dirpath, qt_app=None, dataplus_format=False)
overlays = reader.get_overlay()
overlay = np.zeros(data3d.shape, dtype=np.int8)
# print("overlays ", list(overlays.keys()))
for key in overlays:
overlay += overlays[key]
if not np.allclose(self.data3d.shape, overlay.shape):
logger.warning("Overlay shape does not fit the data shape")
self.segmentation = overlay
return dirpath
def fill_holes_in_segmentation(self, label=None, background_label=0):
"""
Fill holes in segmentation.
Label could be set interactivelly.
:param label: if none, the self.output_label is used
:return:
"""
if label is None:
label = self.output_label
segm_to_fill = self.segmentation == self.nlabels(label)
# self.segmentation[segm_to_fill] = background_label
segm_to_fill = scipy.ndimage.morphology.binary_fill_holes(segm_to_fill)
self.segmentation[segm_to_fill] = self.nlabels(label)
# segm = imtools.show_segmentation.select_labels(segmentation=self.segmentation, labels=labels)
# self.
def get_body_navigation_structures(self):
import bodynavigation
self.bodynavigation = bodynavigation.BodyNavigation(self.data3d, self.voxelsize_mm)
bn = self.bodynavigation
bn.use_new_get_lungs_setup = True
self.segmentation[bn.get_lungs() > 0] = self.nlabels("lungs")
self.segmentation[bn.get_spine() > 0] = self.nlabels("spine")
# self.segmentation[bn.get_chest() > 0] = self.nlabels("chest")
def get_body_navigation_structures_precise(self):
import bodynavigation.organ_detection
self.bodynavigation = bodynavigation.organ_detection.OrganDetection(self.data3d, self.voxelsize_mm)
bn = self.bodynavigation
self.segmentation[bn.getLungs() > 0] = self.nlabels("lungs")
self.segmentation[bn.getBones() > 0] = self.nlabels("bones")
def init_run_list(self, run_list):
if run_list is not None:
self.runner.extend(run_list)
else:
# default run
if self.input_annotaion_file is not None:
self.runner.append(self.json_annotation_import)
if self.run_organ_segmentation:
self.runner.append(self.ninteractivity)
if self.run_vessel_segmentation:
self.runner.append(self.portalVeinSegmentation, **self.run_vessel_segmentation_params)
self.runner.append(self.save_outputs)
pass
def make_run(self):
""" Non-interactive mode
:return:
"""
import time
t0 = time.time()
t1 = time.time()
self.runner.run()
# if self.input_annotaion_file is not None:
# self.json_annotation_import()
# tt = time.time()
# logger.debug("make run input af {}, {}".format(tt - t0 , tt - t1))
# t1 = tt
# if self.run_organ_segmentation:
# self.ninteractivity()
# self.slab["liver"] = 7
# self.segmentation = (self.segmentation == 1).astype('int8') * self.slab["liver"]
# tt = time.time()
# logger.debug("makerun organ seg {}, {}".format(tt - t0, tt - t1))
# t1 = tt
# self.slab["porta"] = 1
# if self.run_vessel_segmentation:
# data = {}
# data['segmentation'] = self.segmentation
# data['slab'] = self.slab
# self.portalVeinSegmentation(**self.run_vessel_segmentation_params)
# tt = time.time()
# logger.debug("makerun pv seg{}, {}".format(tt - t0, tt - t1))
# t1 = tt
#
# self.save_outputs()
tt = time.time()
logger.debug("make run end time {}, {}".format(tt - t0, tt - t1))
def split_vessel(self, input_label=None, output_label1=1, output_label2=2, **kwargs):
"""
Split vessel based on user interactivity.
More documentation in virtual_resection.split_vessel()
:param input_label:
:param output_label1:
:param output_label2:
:param kwargs: read function virtual_resection.split_vessel() for more information.
:return:
"""
if input_label is None:
from PyQt5.QtCore import pyqtRemoveInputHook
pyqtRemoveInputHook()
# mport ipdb; ipdb.set_trace() # BREAKPOINT
print("label of vessel to split")
print("--------------------")
print("for example >> input_label = 2 ")
input_label = "porta"
import ipdb
ipdb.set_trace()
from . import virtual_resection
datap = self.export()
seeds = virtual_resection.cut_editor_old(datap, label=input_label)
lab, cut_by_user = virtual_resection.split_vessel(datap=datap, seeds=seeds, input_label=input_label, **kwargs)
self.segmentation[lab == 1] = output_label1
self.segmentation[lab == 2] = output_label2
def new_label_from_compact_segmentation(self, seeds):
"""
:param seeds:
:return:
"""
def split_organ_by_two_vessels(
self, output_label1=1, output_label2=5,
organ_label=1,
seed_label1=1,
seed_label2=2,
**kwargs):
"""
:param output_label1:
:param output_label2:
:param organ_label:
:param seed_label1:
:param seed_label2:
:param kwargs:
:return:
:py:segmentation:
"""
from . import virtual_resection
datap = self.export()
segm, dist1, dist2 = virtual_resection.split_organ_by_two_vessels(
datap, seeds=self.segmentation,
organ_label=organ_label,
seed_label1=seed_label1,
seed_label2=seed_label2,
**kwargs)
# import sed3
# ed = sed3.sed3(segm)
# ed.show()
self.segmentation[segm == 1] = self.nlabels(output_label1)
self.segmentation[segm == 2] = self.nlabels(output_label2)
def label_volumetric_vessel_tree(self, vessel_label=None, write_to_oseg=True, new_label_str_format="{}{:03d}"):
"""
Select one vessel tree, label it by branches and put it in segmentation and slab.
:param vessel_label: int or string label with vessel. Everything above zero is used if vessel_label is set None.
:param write_to_oseg: Store output into oseg.segmentation if True. The slab is also updated.
:param new_label_str_format: format of new slab
:return:
"""
from . import virtual_resection
return virtual_resection.label_volumetric_vessel_tree(
self,
vessel_label=vessel_label,
write_to_oseg=write_to_oseg,
new_label_str_format=new_label_str_format
)
# def logger_init(): # pragma: no cover
# # from loguru import logger
# # logger = logging.getLogger()
# logger.setLevel(logging.DEBUG)
#
# ch = logging.StreamHandler()
# ch.setLevel(logging.ERROR)
# formatter = logging.Formatter(
# '%(name)s - %(levelname)s - %(message)s'
# )
# ch.setFormatter(formatter)
# logger.addHandler(ch)
#
# # fformatter = logging.Formatter(
# # '%(asctime)s - %(name)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s'
# # )
# fformatter = logging.Formatter('%(asctime)s %(levelname)-8s %(name)-18s %(lineno)-5d %(funcName)-12s %(message)s')
# logfile = "lisa.log"
# if op.exists(op.expanduser("~/lisa_data/")):
# logfile = op.expanduser("~/lisa_data/lisa.log")
# # problems on windows
# # fh = logging.handlers.RotatingFileHandler(logfile, maxBytes=100000, backupCount=9)
# fh = logging.FileHandler(logfile)
# fh.setFormatter(fformatter)
# fh.setLevel(logging.DEBUG)
# logger.addHandler(fh)
#
# logger.debug('logger started')
#
# return ch, fh
def logger_init():
logger.add(sys.stderr,
format="{time} {level} {message}",
# filter="my_module",
level="INFO"
)
if op.exists(op.expanduser("~/lisa_data/")):
logfile = op.expanduser("~/lisa_data/lisa.log")
logger.add(
logfile,
format="{time} {level} {message}",
level="INFO",
# rotation="10MB",
rotation="1 week"
) # Once the file is too old, it's rotated
def lisa_config_init():
"""
Generate default config from function paramteres.
Specific config given by command line argument is implemented in
parser_init() function.
"""
# read confguraton from file, use default values from OrganSegmentation
cfg = config.get_default_function_config(OrganSegmentation.__init__)
# for parameters without support in OrganSegmentation or to overpower
# default OrganSegmentation values use cfgplus
cfgplus = {
'datapath': None,
'viewermax': 225,
'viewermin': -125,
'output_datapath': os.path.expanduser("~/lisa_data"),
'input_datapath_start': os.path.expanduser("~/lisa_data")
# config_version':[1,1]
}
cfg.update(cfgplus)
# now is in cfg default values
# cfg = config.get_config("organ_segmentation.config", cfg)
cfg.update(config_default.CONFIG_DEFAULT)
user_config_path = os.path.join(cfg['output_datapath'],
"organ_segmentation.config")
config.check_config_version_and_remove_old_records(
user_config_path, version=config_version,
records_to_save=['experiment_caption', 'lisa_operator_identifier'])
# read user defined config in user data
cfg = config.get_config(user_config_path, cfg)
return cfg
def parser_init(cfg): # pragma: no cover
# input parser
conf_parser = argparse.ArgumentParser(
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument(
'-cf', '--configfile', default=None,
help="Use another config. It is loaded after default \
config and user config.")
# Read alternative config file. First is loaded default config. Then user
# config in lisa_data directory. After that is readed config defined by
# --configfile parameter
knownargs, unknownargs = conf_parser.parse_known_args()
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
if knownargs.configfile is not None:
cfg = config.get_config(knownargs.configfile, cfg)
parser.add_argument('-dd', '--datapath',
default=cfg["datapath"],
help='path to data dir')
parser.add_argument('-d', '--debug', action='store_true',
help='run in debug mode')
parser.add_argument(
'-vs', '--working_voxelsize_mm',
default=cfg["working_voxelsize_mm"],
type=eval, # type=str,
help='Insert working voxelsize. It can be number or \
array of three numbers. It is possible use original \n \
resolution or half of original resolution. \n \
-vs 3 \n \
-vs [3,3,5] \n \
-vs orig \n \
-vs orig*2 \n \
-vs orig*4 \n \
'
)
parser.add_argument('-mroi', '--manualroi', action='store_true',
help='manual crop before data processing',
default=cfg["manualroi"])
parser.add_argument('-op', '--output_datapath',
default=cfg["output_datapath"],
help='path for output data')
parser.add_argument('-ol', '--output_label', default=1,
help='label for segmented data')
parser.add_argument(
'--slab',
default=cfg["slab"],
type=eval,
help='labels for segmentation,\
example -slab "{\'liver\':1, \'lesions\':6}"')
parser.add_argument(
'-acr', '--autocrop',
help='automatic crop after data processing',
default=cfg["autocrop"])
parser.add_argument(
'-iparams', '--iparams',
default=None,
help='filename of ipars file with stored interactivity')
parser.add_argument(
'-sp', '--segparams',
default=cfg["segparams"],
help='params for segmentation,\
example -sp "{\'pairwise_alpha_per_mm2\':90}"')
parser.add_argument(
'-tx', '--texture_analysis', action='store_true',
help='run with texture analysis')
parser.add_argument('-ed', '--edit_data', action='store_true',
help='Run data editor')
parser.add_argument(
'-vmax', '--viewermax', type=eval, # type=int,
help='Maximum of viewer window, set None for automatic maximum.',
default=cfg["viewermax"])
parser.add_argument(
'-vmin', '--viewermin', type=eval, # type=int,
help='Minimum of viewer window, set None for automatic minimum.',
default=cfg["viewermin"])
parser.add_argument(
'--roi', type=eval, # type=int,
help='Minimum of viewer window, set None for automatic minimum.',
default=cfg["roi"])
parser.add_argument(
'-so', '--show_output', action='store_true',
help='Show output data in viewer')
parser.add_argument(
'-ni', '--no_interactivity', action='store_true',
help='run in no interactivity mode, seeds must be defined')
parser.add_argument('-a', '--arg', nargs='+', type=float)
parser.add_argument(
'-ec', '--experiment_caption', type=str, # type=int,
help='Short caption of experiment. No special characters.',
default=cfg["experiment_caption"])
parser.add_argument(
'-ids', '--input_datapath_start', type=str, # type=int,
help='Start datapath for input dialog.',
default=cfg["input_datapath_start"])
parser.add_argument(
'-iaf', '--input_annotation_file', type=str, # type=int,
help='Set input json annotation file',
default=None)
parser.add_argument(
'-oaf', '--output_annotation_file', type=str, # type=int,
help='Set output json annotation file',
default=None)
parser.add_argument(
'-oi', '--lisa_operator_identifier', type=str, # type=int,
help='Identifier of Lisa operator.',
default=cfg["lisa_operator_identifier"])
parser.add_argument(
'-ss',
'--segmentation_smoothing',
action='store_true',
help='Smoothing of output segmentation',
default=cfg["segmentation_smoothing"]
)
parser.add_argument(
'-icn', '--make_icon',
action='store_true',
help='Create desktop icon on OS X and Linux',
default=False
)
parser.add_argument(
'-gsd', '--get_sample_data',
action='store_true',
help='Download sample data',
default=False
)
parser.add_argument(
'--autolisa',
action='store_true',
help='run autolisa in dir',
default=False
)
parser.add_argument(
'--save_filetype', type=str, # type=int,
help='File type of saving data. It can be pklz(default), pkl or mat',
default=cfg["save_filetype"])
args_obj = parser.parse_args()
# next two lines brings cfg from file over input parser. This is why there
# is no need to have cfg param in input arguments
args = cfg
args.update(vars(args_obj))
return args
def boltzman(x, xmid, tau):
"""
evaluate the boltzman function with midpoint xmid and time constant tau
over x
"""
return 1. / (1. + np.exp(-(x - xmid) / tau))
def main(app=None, splash=None): # pragma: no cover
# import ipdb; ipdb.set_trace() # BREAKPOINT
try:
# ch, fh = \
logger_init()
cfg = lisa_config_init()
args = parser_init(cfg)
if cfg['make_icon'] is True:
import lisa_data
lisa_data.make_icon()
return
if cfg['get_sample_data'] is True:
import dataset
dataset.get_sample_data()
return
# rint args["arg"]
oseg_argspec_keys = config.get_function_keys(
OrganSegmentation.__init__)
if args["debug"]:
import loguru
logger.remove(0)
logger.add(sys.stderr, level="DEBUG")
# ch.setLevel(logging.DEBUG)
args["debug_mode"] = True
if args["iparams"] is not None:
params = misc.obj_from_file(args["iparams"], filetype='pickle')
else:
params = config.subdict(args, oseg_argspec_keys)
logger.debug('params ' + str(params))
if args["autolisa"]:
# if splash is not None:
# splash.finish()
from . import autolisa
al = autolisa.AutoLisa()
print("datapath: ", args["datapath"])
al.run_in_paths(args["datapath"])
return
oseg = OrganSegmentation(**params)
if args["no_interactivity"]:
oseg.make_run()
# oseg.ninteractivity()
# oseg.save_outputs()
else:
# mport_gui()
from .lisaWindow import OrganSegmentationWindow
import PyQt5
import PyQt5.QtGui
from PyQt5.QtWidgets import QApplication
if app is None:
app = QApplication(sys.argv)
# Create and display the splash screen
oseg_w = OrganSegmentationWindow(oseg, qapp=app) # noqa
if splash is not None:
splash.finish(oseg_w)
# import pdb; pdb.set_trace()
sys.exit(app.exec_())
except Exception as e:
import traceback
# mport exceptionProcessing
exceptionProcessing.reportException(e)
print(traceback.format_exc())
# aise e
if __name__ == "__main__":
main()
print("Thank you for using Lisa")
| bsd-3-clause |
BigTone2009/sms-tools | lectures/03-Fourier-properties/plots-code/anal-synth.py | 24 | 1154 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
from scipy.io.wavfile import read
from scipy.fftpack import fft, ifft
import math
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.hanning(501)
N = 512
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
y = DFT.dftSynth(mX, pX, w.size)*sum(w)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot(4,1,1)
plt.plot(np.arange(-hM1, hM2), x1*w, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (oboe-A4.wav)')
plt.subplot(4,1,2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size,min(mX),max(mX)])
plt.title ('mX')
plt.subplot(4,1,3)
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0,pX.size,min(pX),max(pX)])
plt.title ('pX')
plt.subplot(4,1,4)
plt.plot(np.arange(-hM1, hM2), y, lw=1.5)
plt.axis([-hM1, hM2, min(y), max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('anal-synth.png')
plt.show()
| agpl-3.0 |
voxlol/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
pedroig/Parkinsons-Disease-Digital-Biomarker | Random_Forest/learning_utils.py | 1 | 6787 | import numpy as np
import pandas as pd
from sklearn import metrics
from imblearn.over_sampling import SMOTE
def metricsAccumulate(X, y, clf, metrics_total):
"""
Accumulates metrics results from one random forest in the undersampling ensemble.
"""
y_pred = clf.predict(X)
y_prob = clf.predict_proba(X)
y_prob = y_prob[:, 1] # positiveClass
metrics_total["Accuracy"] += metrics.accuracy_score(y, y_pred)
metrics_total["Precision"] += metrics.precision_score(y, y_pred)
metrics_total["Recall"] += metrics.recall_score(y, y_pred)
metrics_total["F1 Score"] += metrics.f1_score(y, y_pred)
metrics_total["ROC score"] += metrics.roc_auc_score(y, y_prob)
def metricsShowAccumulate(metrics_total, ensemble_size):
"""
Prints metrics results for the undersampling ensemble in the training set.
Input:
- metrics_total: dict
Dictionary with the accumulated metrics results from the ensemble.
- ensemble_size: int
"""
print("\nMetrics on Training Set")
for metric in ["Accuracy", "Precision", "Recall", "F1 Score", "ROC score"]:
print("\t{}: {}".format(metric, metrics_total[metric] / ensemble_size))
def metricsPrint(y_test, y_pred, y_prob):
"""
Input:
- y_test: numpy.ndarray
Ground truth (correct) labels.
- y_pred: numpy.ndarray
Predicted labels, as returned by a classifier.
- y_prob: numpy.ndarray
Probability estimates of the positive class.
Returns AUROC score.
"""
print("\tAccuracy:", metrics.accuracy_score(y_test, y_pred))
print("\tPrecision:", metrics.precision_score(y_test, y_pred))
print("\tRecall:", metrics.recall_score(y_test, y_pred))
print("\tF1 Score:", metrics.f1_score(y_test, y_pred))
auroc = metrics.roc_auc_score(y_test, y_prob)
print("\tROC score:", auroc)
return auroc
def metricsShowEnsemble(y_test, y_pred_total, setName, ensemble_size, threshold=0.5):
"""
Input:
- y_test: numpy.ndarray
Ground truth (correct) labels.
- y_pred_total: numpy.ndarray
Sum of the votes of all the random forests in the undersampling ensemble.
- setName: string
Name of the development set to be printed as the title.
- ensemble_size: int
The number of random forests in the undersampling ensemble.
- threshold: float
0 < threshold < 1
Returns AUROC score.
"""
print("\nMetrics on {} Set".format(setName))
y_prob = y_pred_total / ensemble_size
y_prob = y_prob[:, 1] # positiveClass
y_pred = y_prob > threshold
return metricsPrint(y_test, y_pred, y_prob)
def load_dataStandart(featuresSplitName, selectOldAge=False, dropAge=False,
balance_undersampling=False, balance_oversampling=False):
"""
Loads table with the features and applies the selected preprocessing.
Input:
- featuresSplitName: string
Name of the CSV table to be loaded.
- selectOldAge: bool (default=False):
Whether to select only people older 56 years in the set.
- dropAge: bool (default=False)
Whether to use age as a feature.
- balance_undersampling: bool (default=False)
Whether to undersample the majority class in the set.
- balance_oversampling: bool (default=False)
Whether to oversample the minority class in the set.
"""
X = pd.read_csv("../data/{}.csv".format(featuresSplitName), index_col=0)
if selectOldAge:
X = X[X.age > 56]
if dropAge:
X = X.drop(["age"], axis=1)
y = X.Target
X = X.drop("Target", axis=1)
feature_names = X.axes[1]
if balance_undersampling:
X, y = generateUndersample(X, y)
elif balance_oversampling:
sm = SMOTE(ratio='minority')
X, y = sm.fit_sample(X, y)
else:
y = np.asarray(y.values, dtype=np.int8)
return X, y, feature_names
def loadFoldTables(numberOfFolds, noOutlierTable):
"""
Loads tables for all the folds used in the cross-validation.
Input:
- numberOfFolds: integer
Number of folds in which the dataset is split.
- noOutlierTable: bool
Whether to read from tables without possible outliers.
"""
folds = {}
for foldIndex in range(numberOfFolds):
table = 'fold{}'.format(foldIndex)
if noOutlierTable:
table += '_noOutliers'
folds[foldIndex] = pd.read_csv("../data/{}.csv".format(table), index_col=0)
return folds
def load_dataFolds(foldTestNumber, numberOfFolds, noOutlierTable, oldAgeTest=True, dropAge=True):
"""
Loads all the folds tables with the features.
Builds a configuration for training and test sets as specified by the foldTestNumber
Applies the selected preprocessing.
Input:
- foldTestNumber: integer
Fold index for the test set. This number also defines the folds in the training set.
- numberOfFolds: integer
Number of folds in which the dataset is split.
- noOutlierTable: bool
Whether to read from tables without possible outliers.
- selectOldAge: bool (default=False):
Whether to select only people older 56 years in the set.
- dropAge: bool (default=False)
Whether to use age as a feature.
"""
folds = loadFoldTables(numberOfFolds, noOutlierTable)
Xtest = folds[foldTestNumber]
if oldAgeTest:
Xtest = Xtest[Xtest.age > 56]
del folds[foldTestNumber]
Xtrain = pd.concat(folds.values())
Xtrain.reset_index(inplace=True, drop=True)
if dropAge:
Xtest.drop(["age"], axis=1, inplace=True)
Xtrain.drop(["age"], axis=1, inplace=True)
yTest = Xtest.Target
yTest = np.asarray(yTest.values, dtype=np.int8)
yTrain = Xtrain.Target
Xtest.drop("Target", axis=1, inplace=True)
Xtrain.drop("Target", axis=1, inplace=True)
feature_names = Xtrain.axes[1]
return Xtrain, yTrain, Xtest, yTest, feature_names
def generateUndersample(X, y):
"""
Outputs an undersampled configuration of the input table.
Input:
- X: pandas DataFrame
Table with the features.
- y: pandas Series
Labels for all samples in X.
"""
pd_indices = X[y].index
healthy_indices = X[~y].index
if len(pd_indices) > len(healthy_indices):
random_pd_indices = np.random.choice(pd_indices, len(healthy_indices), replace=False)
balanced_indices = np.append(random_pd_indices, healthy_indices)
else:
random_healthy_indices = np.random.choice(healthy_indices, len(pd_indices), replace=False)
balanced_indices = np.append(random_healthy_indices, pd_indices)
X = X.loc[balanced_indices, :]
y = y[balanced_indices]
y = np.asarray(y.values, dtype=np.int8)
return X, y
| mit |
mkawalec/masters | contrib/plot_decay/a1_exp_fit.py | 1 | 3482 | #!/usr/bin/env python2
from sys import argv
import scipy.optimize as opt
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import pearsonr as pear
from pylab import savefig
from plot2 import get_a1
from surv_prob_plot import gen_fits
from colorsys import hsv_to_rgb
from matplotlib import rcParams
import matplotlib.gridspec as gridspec
from get_dims import get_dims
import params
def model_function(x, a0, a1, a2):
return a0 * np.exp(-a1 * np.exp(a2 * x))
def execute_fit():
fits = gen_fits(0.3, 0.1)
print fits[24][1.04]
rcParams['figure.figsize'] = 4.981320049813201, 4
gs = gridspec.GridSpec(5, 1)
gs.update(wspace=0)
ax1 = plt.subplot(gs[:4, 0])
ax2 = plt.subplot(gs[4, 0], sharex=ax1)
coefficients = {}
for n, domain in enumerate(sorted(fits.keys(), key=lambda x: float(x))):
if domain == 10:
continue
curr_keys = sorted(fits[domain].keys(), key=lambda x: float(x))
x = map(lambda x: float(x), curr_keys)
y = map(lambda x: fits[domain][x]['avg'][1], curr_keys)
with open('domain_' + str(domain), 'w') as f:
for i,R in enumerate(x):
f.write("%f %f\n" % (R, y[i]))
try:
popt, pcov = opt.curve_fit(model_function, np.array(x), np.array(y),
(0.5324, -0.0090589, 6.2374),
maxfev=100000)
coefficients[domain] = popt
except RuntimeError:
continue
color1 = hsv_to_rgb(float(n) / len(fits.keys()), 0.7, 0.9)
color2 = hsv_to_rgb(float(n + 1) / len(fits.keys()), 0.7, 0.9)
if int(domain)%4 == 0 and int(domain) != 20:
ax1.plot(x, y,
label='Data at %s$\pi$' % (domain),
color=color1)
ax1.plot(x,
[model_function(x_val, *popt) for x_val in x],
label='Fit at %s$\pi$' % (domain), color=color2)
if domain == 24:
values = [model_function(x_val, *popt) for x_val in x]
residuals = map(lambda x: abs(x[0] - x[1]) / x[0], zip(y, values))
ax2.plot(x, residuals, marker='o')
ax2.set_yscale('log')
ax1.legend(loc=0)
ax1.xaxis.set_visible(False)
ax2.set_xlabel('R')
ax1.set_ylabel('Value of $a_1$')
ax2.set_ylabel('Residue\nat 24$\pi$')
ax2.yaxis.tick_right()
plt.subplots_adjust(bottom=0.15)
savefig('double_exp_fit.png', dpi=600)
plot_coeffs(coefficients)
#plt.show()
def plot_coeffs(coeffs):
rcParams['figure.figsize'] = get_dims()
fig, ax1 = plt.subplots()
toplot = dict(x=[], y1=[], y2=[])
for domain in sorted(coeffs.keys(), key=lambda x: float(x)):
toplot['x'].append(float(domain))
toplot['y1'].append(coeffs[domain][1])
toplot['y2'].append(coeffs[domain][2])
ax1.plot(toplot['x'], toplot['y1'], marker='o',
color=hsv_to_rgb(0, 0.7, 0.9), label='$b_1$')
plt.subplots_adjust(bottom=0.3, left=0.20)
ax1.set_yscale('log')
ax1.set_xlabel('Domain size/$\pi$')
savefig('b1_vs_domain.png', dpi=600)
fig, ax2 = plt.subplots()
ax2.plot(toplot['x'], toplot['y2'], marker='o',
color=hsv_to_rgb(0.5, 0.7, 0.9), label='$b_2$')
ax2.set_xlabel('Domain size/$\pi$')
plt.subplots_adjust(bottom=0.3)
savefig('b2_vs_domain.png', dpi=600)
if __name__ == '__main__':
execute_fit()
| gpl-3.0 |
glenioborges/ibis | scripts/test_data_admin.py | 8 | 17250 | #! /usr/bin/env python
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import os.path as osp
from os.path import join as pjoin
from subprocess import check_call
from click import group, option
import ibis
from ibis.compat import BytesIO
from ibis.common import IbisError
from ibis.impala.tests.common import IbisTestEnv
from ibis.util import guid
import numpy as np
import pandas as pd
import pandas.util.testing as tm
ENV = IbisTestEnv()
IBIS_TEST_DATA_S3_BUCKET = 'ibis-resources'
IBIS_TEST_DATA_LOCAL_DIR = 'ibis-testing-data'
TARBALL_NAME = 'ibis-testing-data.tar.gz'
IBIS_TEST_DATA_TARBALL = 'testing/{0}'.format(TARBALL_NAME)
IBIS_TEST_AWS_KEY_ID = os.environ.get('IBIS_TEST_AWS_KEY_ID')
IBIS_TEST_AWS_SECRET = os.environ.get('IBIS_TEST_AWS_SECRET')
def make_ibis_client():
hc = ibis.hdfs_connect(host=ENV.nn_host, port=ENV.webhdfs_port,
auth_mechanism=ENV.auth_mechanism,
verify=(ENV.auth_mechanism
not in ['GSSAPI', 'LDAP']))
if ENV.auth_mechanism in ['GSSAPI', 'LDAP']:
print("Warning: ignoring invalid Certificate Authority errors")
return ibis.impala.connect(host=ENV.impala_host, port=ENV.impala_port,
auth_mechanism=ENV.auth_mechanism,
hdfs_client=hc)
def can_write_to_hdfs(con):
test_path = pjoin(ENV.test_data_dir, ibis.util.guid())
test_file = BytesIO(ibis.util.guid().encode('utf-8'))
try:
con.hdfs.put(test_path, test_file)
con.hdfs.rm(test_path)
return True
except:
return False
def can_build_udfs():
try:
check_call('which cmake', shell=True)
except:
print('Could not find cmake on PATH')
return False
try:
check_call('which make', shell=True)
except:
print('Could not find make on PATH')
return False
try:
check_call('which clang++', shell=True)
except:
print('Could not find LLVM on PATH; if IBIS_TEST_LLVM_CONFIG is set, '
'try setting PATH="$($IBIS_TEST_LLVM_CONFIG --bindir):$PATH"')
return False
return True
def is_data_loaded(con):
if not con.hdfs.exists(ENV.test_data_dir):
return False
if not con.exists_database(ENV.test_data_db):
return False
return True
def is_udf_loaded(con):
bitcode_dir = pjoin(ENV.test_data_dir, 'udf')
if con.hdfs.exists(bitcode_dir):
return True
return False
def dnload_ibis_test_data_from_s3(local_path):
url = 'https://{0}.s3.amazonaws.com/{1}'.format(
IBIS_TEST_DATA_S3_BUCKET, IBIS_TEST_DATA_TARBALL)
cmd = 'cd {0} && wget -q {1} && tar -xzf {2}'.format(
local_path, url, TARBALL_NAME)
check_call(cmd, shell=True)
data_dir = pjoin(local_path, IBIS_TEST_DATA_LOCAL_DIR)
print('Downloaded {0} and unpacked it to {1}'.format(url, data_dir))
return data_dir
def upload_ibis_test_data_to_hdfs(con, data_path):
hdfs = con.hdfs
if hdfs.exists(ENV.test_data_dir):
hdfs.rmdir(ENV.test_data_dir)
hdfs.put(ENV.test_data_dir, data_path, verbose=True)
def create_test_database(con):
if con.exists_database(ENV.test_data_db):
con.drop_database(ENV.test_data_db, force=True)
con.create_database(ENV.test_data_db)
print('Created database {0}'.format(ENV.test_data_db))
def create_parquet_tables(con):
parquet_files = con.hdfs.ls(pjoin(ENV.test_data_dir, 'parquet'))
schemas = {
'functional_alltypes': ibis.schema(
[('id', 'int32'),
('bool_col', 'boolean'),
('tinyint_col', 'int8'),
('smallint_col', 'int16'),
('int_col', 'int32'),
('bigint_col', 'int64'),
('float_col', 'float'),
('double_col', 'double'),
('date_string_col', 'string'),
('string_col', 'string'),
('timestamp_col', 'timestamp'),
('year', 'int32'),
('month', 'int32')]),
'tpch_region': ibis.schema(
[('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string')])}
tables = []
for table_name in parquet_files:
print('Creating {0}'.format(table_name))
# if no schema infer!
schema = schemas.get(table_name)
path = pjoin(ENV.test_data_dir, 'parquet', table_name)
table = con.parquet_file(path, schema=schema, name=table_name,
database=ENV.test_data_db, persist=True)
tables.append(table)
return tables
def create_avro_tables(con):
avro_files = con.hdfs.ls(pjoin(ENV.test_data_dir, 'avro'))
schemas = {
'tpch_region_avro': {
'type': 'record',
'name': 'a',
'fields': [
{'name': 'R_REGIONKEY', 'type': ['null', 'int']},
{'name': 'R_NAME', 'type': ['null', 'string']},
{'name': 'R_COMMENT', 'type': ['null', 'string']}]}}
tables = []
for table_name in avro_files:
print('Creating {0}'.format(table_name))
schema = schemas[table_name]
path = pjoin(ENV.test_data_dir, 'avro', table_name)
table = con.avro_file(path, schema, name=table_name,
database=ENV.test_data_db, persist=True)
tables.append(table)
return tables
def build_udfs():
print('Building UDFs')
ibis_home_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
udf_dir = pjoin(ibis_home_dir, 'testing', 'udf')
check_call('cmake . && make', shell=True, cwd=udf_dir)
def upload_udfs(con):
ibis_home_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
build_dir = pjoin(ibis_home_dir, 'testing', 'udf', 'build')
bitcode_dir = pjoin(ENV.test_data_dir, 'udf')
print('Uploading UDFs to {0}'.format(bitcode_dir))
if con.hdfs.exists(bitcode_dir):
con.hdfs.rmdir(bitcode_dir)
con.hdfs.put(bitcode_dir, build_dir, verbose=True)
def scrape_parquet_files(tmp_db, con):
to_scrape = [('tpch', x) for x in con.list_tables(database='tpch')]
to_scrape.append(('functional', 'alltypes'))
for db, tname in to_scrape:
table = con.table(tname, database=db)
new_name = '{0}_{1}'.format(db, tname)
print('Creating {0}'.format(new_name))
con.create_table(new_name, table, database=tmp_db)
def download_parquet_files(con, tmp_db_hdfs_path):
parquet_path = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'parquet')
print("Downloading {0}".format(parquet_path))
con.hdfs.get(tmp_db_hdfs_path, parquet_path)
def generate_sqlite_db(con):
from sqlalchemy import create_engine
path = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'ibis_testing.db')
csv_path = guid()
engine = create_engine('sqlite:///{0}'.format(path))
generate_sql_csv_sources(csv_path, con.database('ibis_testing'))
make_sqlite_testing_db(csv_path, engine)
shutil.rmtree(csv_path)
def download_avro_files(con):
avro_hdfs_path = '/test-warehouse/tpch.region_avro'
avro_local_path = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'avro')
os.mkdir(avro_local_path)
print("Downloading {0}".format(avro_hdfs_path))
con.hdfs.get(avro_hdfs_path, pjoin(avro_local_path, 'tpch_region_avro'))
def generate_csv_files():
N = 10
nfiles = 10
df = pd.DataFrame({'foo': [tm.rands(10) for _ in xrange(N)],
'bar': np.random.randn(N),
'baz': np.random.randint(0, 100, size=N)},
columns=['foo', 'bar', 'baz'])
csv_base = pjoin(IBIS_TEST_DATA_LOCAL_DIR, 'csv')
os.mkdir(csv_base)
for i in xrange(nfiles):
csv_path = pjoin(csv_base, '{0}.csv'.format(i))
print('Writing {0}'.format(csv_path))
df.to_csv(csv_path, index=False, header=False)
def copy_tarball_to_versioned_backup(bucket):
key = bucket.get_key(IBIS_TEST_DATA_TARBALL)
if key:
names = [k.name for k in bucket.list(prefix=IBIS_TEST_DATA_TARBALL)]
names.remove(IBIS_TEST_DATA_TARBALL)
# get the highest number for this key name
last = sorted([int(names.split('.')[-1]) for name in names])[-1]
next_key = '{0}.{1}'.format(IBIS_TEST_DATA_TARBALL, last + 1)
key.copy(IBIS_TEST_DATA_S3_BUCKET, next_key)
key.delete()
assert bucket.get_key(IBIS_TEST_DATA_TARBALL) is None
_sql_tpch_tables = ['tpch_lineitem', 'tpch_customer',
'tpch_region', 'tpch_nation', 'tpch_orders']
_sql_tables = ['functional_alltypes']
def _project_tpch_lineitem(t):
return t['l_orderkey',
'l_partkey',
'l_suppkey',
'l_linenumber',
t.l_quantity.cast('double'),
t.l_extendedprice.cast('double'),
t.l_discount.cast('double'),
t.l_tax.cast('double'),
'l_returnflag',
'l_linestatus',
'l_shipdate',
'l_commitdate',
'l_receiptdate',
'l_shipinstruct',
'l_shipmode']
def _project_tpch_orders(t):
return t['o_orderkey',
'o_custkey',
'o_orderstatus',
t.o_totalprice.cast('double'),
'o_orderdate',
'o_orderpriority',
'o_clerk',
'o_shippriority']
def _project_tpch_customer(t):
return t['c_custkey',
'c_name',
'c_nationkey',
'c_phone',
'c_acctbal',
'c_mktsegment']
_projectors = {
'tpch_customer': _project_tpch_customer,
'tpch_lineitem': _project_tpch_lineitem,
'tpch_orders': _project_tpch_orders,
}
def generate_sql_csv_sources(output_path, db):
ibis.options.sql.default_limit = None
if not osp.exists(output_path):
os.mkdir(output_path)
for name in _sql_tables:
print(name)
table = db[name]
if name in _projectors:
table = _projectors[name](table)
df = table.execute()
path = osp.join(output_path, name)
df.to_csv('{0}.csv'.format(path), na_rep='\\N')
def make_sqlite_testing_db(csv_dir, con):
for name in _sql_tables:
print(name)
path = osp.join(csv_dir, '{0}.csv'.format(name))
df = pd.read_csv(path, na_values=['\\N'])
pd.io.sql.to_sql(df, name, con, chunksize=10000)
# ==========================================
@group(context_settings={'help_option_names': ['-h', '--help']})
def main():
"""Manage test data for Ibis"""
pass
@main.command()
def printenv():
"""Print current IbisTestEnv"""
print(str(ENV))
@main.command()
@option('--create-tarball', is_flag=True,
help="Create a gzipped tarball")
@option('--push-to-s3', is_flag=True,
help="Also push the tarball to s3://ibis-test-resources")
def create(create_tarball, push_to_s3):
"""Create Ibis test data"""
print(str(ENV))
con = make_ibis_client()
# verify some assumptions before proceeding
if push_to_s3 and not create_tarball:
raise IbisError(
"Must specify --create-tarball if specifying --push-to-s3")
if osp.exists(IBIS_TEST_DATA_LOCAL_DIR):
raise IbisError(
'Local dir {0} already exists; please remove it first'.format(
IBIS_TEST_DATA_LOCAL_DIR))
if not con.exists_database('tpch'):
raise IbisError('`tpch` database does not exist')
if not con.hdfs.exists('/test-warehouse/tpch.region_avro'):
raise IbisError(
'HDFS dir /test-warehouse/tpch.region_avro does not exist')
# generate tmp identifiers
tmp_db_hdfs_path = pjoin(ENV.tmp_dir, guid())
tmp_db = guid()
os.mkdir(IBIS_TEST_DATA_LOCAL_DIR)
try:
# create the tmp data locally
con.create_database(tmp_db, path=tmp_db_hdfs_path)
print('Created database {0} at {1}'.format(tmp_db, tmp_db_hdfs_path))
# create the local data set
scrape_parquet_files(tmp_db, con)
download_parquet_files(con, tmp_db_hdfs_path)
download_avro_files(con)
generate_csv_files()
generate_sqlite_db(con)
finally:
con.drop_database(tmp_db, force=True)
assert not con.hdfs.exists(tmp_db_hdfs_path)
if create_tarball:
check_call('tar -zc {0} > {1}'
.format(IBIS_TEST_DATA_LOCAL_DIR, TARBALL_NAME),
shell=True)
if push_to_s3:
import boto
s3_conn = boto.connect_s3(IBIS_TEST_AWS_KEY_ID,
IBIS_TEST_AWS_SECRET)
bucket = s3_conn.get_bucket(IBIS_TEST_DATA_S3_BUCKET)
# copy_tarball_to_versioned_backup(bucket)
key = bucket.new_key(IBIS_TEST_DATA_TARBALL)
print('Upload tarball to S3')
key.set_contents_from_filename(TARBALL_NAME, replace=True)
@main.command()
@option('--data/--no-data', default=True, help='Load (skip) ibis testing data')
@option('--udf/--no-udf', default=True, help='Build/upload (skip) test UDFs')
@option('--data-dir',
help='Path to testing data; dnloads data from S3 if unset')
@option('--overwrite', is_flag=True, help='Forces overwriting of data/UDFs')
def load(data, udf, data_dir, overwrite):
"""Load Ibis test data and build/upload UDFs"""
print(str(ENV))
con = make_ibis_client()
# validate our environment before performing possibly expensive operations
if not can_write_to_hdfs(con):
raise IbisError('Failed to write to HDFS; check your settings')
if udf and not can_build_udfs():
raise IbisError('Build environment does not support building UDFs')
# load the data files
if data:
already_loaded = is_data_loaded(con)
print('Attempting to load Ibis test data (--data)')
if already_loaded and not overwrite:
print('Data is already loaded and not overwriting; moving on')
else:
if already_loaded:
print('Data is already loaded; attempting to overwrite')
tmp_dir = tempfile.mkdtemp(prefix='__ibis_tmp_')
try:
if not data_dir:
print('Did not specify a local dir with the test data, so '
'downloading it from S3')
data_dir = dnload_ibis_test_data_from_s3(tmp_dir)
print('Uploading to HDFS')
upload_ibis_test_data_to_hdfs(con, data_dir)
print('Creating Ibis test data database')
create_test_database(con)
parquet_tables = create_parquet_tables(con)
avro_tables = create_avro_tables(con)
for table in parquet_tables + avro_tables:
print('Computing stats for {0}'.format(table.op().name))
table.compute_stats()
# sqlite database
sqlite_src = osp.join(data_dir, 'ibis_testing.db')
shutil.copy(sqlite_src, '.')
finally:
shutil.rmtree(tmp_dir)
else:
print('Skipping Ibis test data load (--no-data)')
# build and upload the UDFs
if udf:
already_loaded = is_udf_loaded(con)
print('Attempting to build and load test UDFs')
if already_loaded and not overwrite:
print('UDFs already loaded and not overwriting; moving on')
else:
if already_loaded:
print('UDFs already loaded; attempting to overwrite')
print('Building UDFs')
build_udfs()
print('Uploading UDFs')
upload_udfs(con)
else:
print('Skipping UDF build/load (--no-udf)')
@main.command()
@option('--test-data', is_flag=True,
help='Cleanup Ibis test data, test database, and also the test UDFs '
'if they are stored in the test data directory/database')
@option('--udfs', is_flag=True, help='Cleanup Ibis test UDFs only')
@option('--tmp-data', is_flag=True,
help='Cleanup Ibis temporary HDFS directory')
@option('--tmp-db', is_flag=True, help='Cleanup Ibis temporary database')
def cleanup(test_data, udfs, tmp_data, tmp_db):
"""Cleanup Ibis test data and UDFs"""
print(str(ENV))
con = make_ibis_client()
if udfs:
# this comes before test_data bc the latter clobbers this too
con.hdfs.rmdir(pjoin(ENV.test_data_dir, 'udf'))
if test_data:
con.drop_database(ENV.test_data_db, force=True)
con.hdfs.rmdir(ENV.test_data_dir)
if tmp_data:
con.hdfs.rmdir(ENV.tmp_dir)
if tmp_db:
con.drop_database(ENV.tmp_db, force=True)
if __name__ == '__main__':
main()
| apache-2.0 |
PredictiveScienceLab/cluster-opt-bgo | pydes/_core.py | 2 | 10321 | """
Global Optimization of Expensive Functions.
Author:
Ilias Bilionis
Date:
10/15/2014
01/29/2015
"""
__all__ = ['expected_improvement',
'fb_expected_improvement',
'expected_information_gain',
'minimize', 'maximize',
'plot_summary', 'plot_summary_2d']
import GPy
import GPy.inference.mcmc
from GPy.inference.mcmc import HMC
import numpy as np
import math
import scipy
import scipy.stats as stats
from scipy.integrate import quad
#from choldate import choldowndate, cholupdate
from statsmodels.sandbox.distributions.multivariate import mvnormcdf
import math
def remove(mu, S, i):
"""
Remove i element from mu and S.
"""
mu_ni = np.hstack([mu[:i], mu[i+1:]])
S_nini = np.array(np.bmat([[S[:i, :i], S[:i, i+1:]],
[S[i+1:, :i], S[i+1:, i+1:]]]))
return mu_ni, S_nini
def maxpdf(x, mu, S):
s = np.zeros(x.shape[0])
d = mu.shape[0]
for i in xrange(d):
mu_i = mu[i]
S_ii = S[i, i]
mu_ni, S_nini = remove(mu, S, i)
S_ini = np.array(np.bmat([[S[:i, i], S[i+1:, i]]]))
mu_nii = mu_ni[:, None] + np.dot(S_ini.T, x[None, :] - mu_i) / S_ii
S_ninii = S_nini - np.dot(S_ini, S_ini.T) / S_ii
phi_i = norm.pdf(x, loc=mu_i, scale=np.sqrt(S_ii))
Phi_i = np.array([mvnormcdf(x[j], mu_nii[:, j], S_ninii)
for j in xrange(x.shape[0])])
s += phi_i * Phi_i
return s
def expected_improvement(X_design, model, mode='min'):
"""
Compute the Expected Improvement criterion at ``x``.
"""
y = model.Y.flatten()
m_s, v_s = model.predict(X_design)[:2]
m_s = m_s.flatten()
v_s = v_s.flatten()
s_s = np.sqrt(v_s)
if mode == 'min':
m_n = np.min(y)
u = (m_n - m_s) / s_s
elif mode == 'max':
m_n = np.max(y)
u = (m_s - m_n) / s_s
else:
raise NotImplementedError('I do not know what to do with mode %s' %mode)
ei = s_s * (u * stats.norm.cdf(u) + stats.norm.pdf(u))
return ei
def fb_expected_improvement(X_design, model, mode='min', stepsize=1e-2,
num_samples=100):
"""
Compute the fully Bayesian expected improvement criterion.
"""
model.rbf.variance.set_prior(GPy.priors.LogGaussian(0., 1.))
model.rbf.lengthscale.set_prior(GPy.priors.LogGaussian(0., 0.1))
mcmc = HMC(model, stepsize=stepsize)
params = mcmc.sample(num_samples=num_samples)
ei_all = []
for i in xrange(params.shape[0]):
model.rbf.variance = params[i, 0]
model.rbf.lengthscale = params[i, 1]
ei = expected_improvement(X_design, model, mode=mode)
ei_all.append(ei)
ei_all = np.array(ei_all)
ei_fb = ei_all.mean(axis=0)
return ei_fb
def min_qoi(X_design, f):
"""
A QoI that corresponds to the min of the function.
"""
return np.argmin(f, axis=0)
def kl_divergence(g1, g2):
"""
Compute the KL divergence.
"""
f = lambda(x): g1.evaluate([[x]]) * np.log(g1.evaluate([[x]]) / g2.evaluate([[x]]))
return quad(f, 0, 6)
def expected_information_gain(X_design, model, num_Omegas=1000,
num_y=100,
qoi=min_qoi,
qoi_bins=None,
qoi_num_bins=20):
"""
Compute the expected information gain criterion at ``x``.
"""
import matplotlib.pyplot as plt
m_d, K_d = model.predict(X_design, full_cov=True)[:2]
U_d = scipy.linalg.cholesky(K_d, lower=False)
Omegas = np.random.randn(X_design.shape[0], num_Omegas)
delta_y_i = np.random.randn(num_y)
# Find the histogram of Q the current data
S_d = m_d + np.dot(U_d.T, Omegas)
Q_d = qoi(X_design, S_d)
tmp = stats.itemfreq(Q_d)
yy = model.posterior_samples(X_design, 10)
plt.plot(X_design, yy, 'm', linewidth=2)
plt.savefig('examples/samples.png')
plt.clf()
p_d = np.zeros((X_design.shape[0],))
p_d[np.array(tmp[:, 0], dtype='int')] = tmp[:, 1] / np.sum(tmp[:, 1])
if qoi_bins is None and qoi is min_qoi:
#qoi_bins = np.linspace(np.min(Q_d), np.max(Q_d), qoi_num_bins)[None, :]
qoi_bins = np.linspace(X_design[0, 0], X_design[-1, 0], qoi_num_bins)[None, :]
H_d, e_d = np.histogramdd(Q_d, normed=True, bins=qoi_bins)
delta_e_d = e_d[0][1] - e_d[0][0]
#p_d = H_d * delta_e_d
plt.plot(X_design, p_d)
plt.plot(X_design, m_d)
plt.plot(model.X, model.Y, 'ro', markersize=10)
plt.hist(X_design[Q_d, 0], normed=True, alpha=0.5)
plt.savefig('examples/kde_Q.png')
plt.clf()
print 'Entropy:', stats.entropy(p_d)
G = np.zeros((X_design.shape[0],))
p_d += 1e-16
for i in xrange(X_design.shape[0]):
u_di = K_d[:, i] / math.sqrt(K_d[i, i])
u_di = u_di[:, None]
#K_dd = K_d - np.dot(u_di, u_di.T)
#K_dd += np.eye(K_d.shape[0]) * 1e-6
choldowndate(U_d, u_di.flatten().copy())
#U_d = scipy.linalg.cholesky(K_dd, lower=False)
# Pick a value for y:
Omegas = np.random.randn(X_design.shape[0], num_Omegas)
delta_y_i = np.random.randn(num_y)
m_dgi = m_d + delta_y_i * u_di
S_dgi = m_dgi[:, :, None] + np.dot(U_d.T, Omegas)[:, None, :]
#for j in xrange(num_y):
# print S_dgi[:, j, :]
# plt.plot(X_design, S_dgi[:, j, :], 'm', linewidth=0.5)
# plt.plot(model.X, model.likelihood.Y, 'ro', markersize=10)
# plt.savefig('examples/ig_S_' + str(i).zfill(2) + '_' + str(j).zfill(2) + '.png')
# plt.clf()
Q_dgi = qoi(X_design, S_dgi)
#print Q_dgi
#quit()
p_d_i = np.zeros((num_y, X_design.shape[0]))
for j in xrange(num_y):
tmp = stats.itemfreq(Q_dgi[j, :])
p_d_i[j, np.array(tmp[:, 0], dtype='int')] = tmp[:, 1] / np.sum(tmp[:, 1])
p_d_i += 1e-16
G[i] = np.mean([stats.entropy(p_d_i[j, :], p_d) for j in xrange(num_y)])
#G[i] = np.mean([-stats.entropy(p_d_i[j, :]) for j in xrange(num_y)])
#plt.plot(X_design, S_dgi[:, :, 0], 'm', linewidth=0.5)
#plt.plot(X_design, m_d, 'r', linewidth=2)
plt.plot(model.X, np.zeros((model.X.shape[0], 1)), 'ro', markersize=10)
plt.plot(X_design, np.mean(p_d_i, axis=0), 'g', linewidth=2)
plt.savefig('examples/ig_S_' + str(i).zfill(2) + '.png')
plt.clf()
print X_design[i, 0], G[i]
cholupdate(U_d, u_di.flatten().copy())
plt.plot(X_design, G)
plt.savefig('examples/ig_KL.png')
plt.clf()
return G
def plot_summary(f, X_design, model, prefix, G, Gamma_name):
"""
Plot a summary of the current iteration.
"""
import matplotlib.pyplot as plt
X = model.X
y = model.Y
m_s, k_s = model.predict(X_design, full_cov=True)
m_05, m_95 = model.predict_quantiles(X_design)
fig, ax1 = plt.subplots()
ax1.plot(X_design, f(X_design), 'b', linewidth=2)
ax1.plot(X, y, 'go', linewidth=2, markersize=10, markeredgewidth=2)
ax1.plot(X_design, m_s, 'r--', linewidth=2)
ax1.fill_between(X_design.flatten(), m_05.flatten(), m_95.flatten(),
color='grey', alpha=0.5)
ax1.set_ylabel('$f(x)$', fontsize=16)
ax2 = ax1.twinx()
ax2.plot(X_design, G, 'g', linewidth=2)
ax2.set_ylabel('$%s(x)$' % Gamma_name, fontsize=16, color='g')
#ax2.set_ylim([0., 3.])
plt.setp(ax2.get_yticklabels(), color='g')
png_file = prefix + '.png'
print 'Writing:', png_file
plt.savefig(png_file)
plt.clf()
def plot_summary_2d(f, X_design, model, prefix, G, Gamma_name):
"""
Plot a summary of the current iteration.
"""
import matplotlib.pyplot as plt
n = np.sqrt(X_design.shape[0])
X1, X2 = (X_design[:, i].reshape((n, n)) for i in range(2))
GG = G.reshape((n, n))
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, GG)
fig.colorbar(cax)
ax.set_xlabel('$x_1$', fontsize=16)
ax.set_ylabel('$x_2$', fontsize=16)
plt.savefig(prefix + '_' + Gamma_name + '.png')
plt.clf()
X = model.X
m_s, k_s = model.predict(X_design)
M_s = m_s.reshape((n, n))
S_s = np.sqrt(k_s.reshape((n, n)))
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, M_s)
fig.colorbar(cax)
ax.plot(X[:, 0], X[:, 1], 'k.', markersize=10)
ax.set_xlabel('$x_1$', fontsize=16)
ax.set_ylabel('$x_2$', fontsize=16)
plt.savefig(prefix + '_mean.png')
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, S_s)
fig.colorbar(cax)
ax.plot(X[:, 0], X[:, 1], 'k.', markersize=10)
ax.set_xlabel('$x_1$', fontsize=16)
ax.set_ylabel('$x_2$', fontsize=16)
plt.savefig(prefix + '_std.png')
plt.clf()
def minimize(f, X_init, X_design, prefix="minimize", Gamma=expected_improvement,
Gamma_name='EI', max_it=10, tol=1e-1, callback=None):
"""
Optimize f using a limited number of evaluations.
"""
X = X_init
y = np.array([f(X[i, :]) for i in xrange(X.shape[0])])
k = GPy.kern.RBF(X.shape[1], ARD=True)
for count in xrange(max_it):
model = GPy.models.GPRegression(X, y, k)
model.Gaussian_noise.variance.constrain_fixed(1e-6)
model.optimize()
print str(model)
G = Gamma(X_design, model)
if callback is not None:
callback(f, X_design, model,
prefix + '_' + str(count).zfill(2), G, Gamma_name)
i = np.argmax(G)
if G[i] < tol:
print '*** converged'
break
print 'I am adding:', X_design[i:(i+1), :]
print 'which has a G of', G[i]
X = np.vstack([X, X_design[i:(i+1), :]])
y = np.vstack([y, f(X_design[i, :])])
print 'it =', count+1, ', min =', np.min(y), ' arg min =', X[np.argmin(y), :]
return X, y
def maximize(f, X_init, X_design, prefix='maximize', Gamma=expected_improvement,
Gamma_name='EI', max_it=10, tol=1e-1, callback=None):
"""
Maximize the function ``f``.
"""
f_minus = lambda(x) : -f(x)
return minimize(f_minus, X_init, X_design, prefix=prefix, Gamma=Gamma,
Gamma_name=Gamma_name, max_it=max_it, tol=tol)
| mit |
louispotok/pandas | pandas/tests/plotting/test_deprecated.py | 2 | 1528 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_scatter_plot_legacy(self):
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
UNR-AERIAL/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
hrashk/sympy | sympy/external/importtools.py | 11 | 7107 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
return eval(os.getenv('SYMPY_DEBUG', 'False'))
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/api/test_api.py | 7 | 7858 | # -*- coding: utf-8 -*-
from warnings import catch_warnings
import pytest
import pandas as pd
from pandas import api
from pandas.util import testing as tm
class Base(object):
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted([f for f in dir(namespace) if not f.startswith('_')])
if ignored is not None:
result = sorted(list(set(result) - set(ignored)))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ['tests', 'locale', 'conftest']
# top-level sub-packages
lib = ['api', 'compat', 'core', 'errors', 'pandas',
'plotting', 'test', 'testing', 'tools', 'tseries',
'util', 'options', 'io']
# these are already deprecated; awaiting removal
deprecated_modules = ['stats', 'datetools', 'parser',
'json', 'lib', 'tslib']
# misc
misc = ['IndexSlice', 'NaT']
# top-level classes
classes = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset',
'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Float64Index',
'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex',
'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index',
'Series', 'SparseArray', 'SparseDataFrame',
'SparseSeries', 'TimeGrouper', 'Timedelta',
'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
# these are already deprecated; awaiting removal
deprecated_classes = ['WidePanel', 'Panel4D',
'SparseList', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
# external modules exposed in pandas namespace
modules = ['np', 'datetime']
# top-level functions
funcs = ['bdate_range', 'concat', 'crosstab', 'cut',
'date_range', 'interval_range', 'eval',
'factorize', 'get_dummies',
'infer_freq', 'isnull', 'lreshape',
'melt', 'notnull', 'offsets',
'merge', 'merge_ordered', 'merge_asof',
'period_range',
'pivot', 'pivot_table', 'qcut',
'show_versions', 'timedelta_range', 'unique',
'value_counts', 'wide_to_long']
# top-level option funcs
funcs_option = ['reset_option', 'describe_option', 'get_option',
'option_context', 'set_option',
'set_eng_float_format']
# top-level read_* funcs
funcs_read = ['read_clipboard', 'read_csv', 'read_excel', 'read_fwf',
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
'read_table', 'read_feather']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
'to_numeric', 'to_pickle', 'to_timedelta']
# these are already deprecated; awaiting removal
deprecated_funcs = ['ewma', 'ewmcorr', 'ewmcov', 'ewmstd', 'ewmvar',
'ewmvol', 'expanding_apply', 'expanding_corr',
'expanding_count', 'expanding_cov', 'expanding_kurt',
'expanding_max', 'expanding_mean', 'expanding_median',
'expanding_min', 'expanding_quantile',
'expanding_skew', 'expanding_std', 'expanding_sum',
'expanding_var', 'rolling_apply',
'rolling_corr', 'rolling_count', 'rolling_cov',
'rolling_kurt', 'rolling_max', 'rolling_mean',
'rolling_median', 'rolling_min', 'rolling_quantile',
'rolling_skew', 'rolling_std', 'rolling_sum',
'rolling_var', 'rolling_window', 'ordered_merge',
'pnow', 'match', 'groupby', 'get_store',
'plot_params', 'scatter_matrix']
def test_api(self):
self.check(pd,
self.lib + self.misc +
self.modules + self.deprecated_modules +
self.classes + self.deprecated_classes +
self.deprecated_classes_in_future +
self.funcs + self.funcs_option +
self.funcs_read + self.funcs_to +
self.deprecated_funcs,
self.ignored)
class TestApi(Base):
allowed = ['types']
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = ['assert_frame_equal', 'assert_series_equal',
'assert_index_equal']
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
class TestDatetoolsDeprecation(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.to_datetime('2016-01-01')
def test_deprecation_access_obj(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.monthEnd
class TestTopLevelDeprecations(object):
# top-level API deprecations
# GH 13790
def test_pnow(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.pnow(freq='M')
def test_term(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Term('index>=date')
def test_expr(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Expr('2>1')
def test_match(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.match([1, 2, 3], [1])
def test_groupby(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
# GH 15940
def test_get_store(self):
pytest.importorskip('tables')
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s = pd.get_store(path)
s.close()
class TestJson(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.json.dumps([])
class TestParser(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.parser.na_values
class TestLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.lib.infer_dtype('foo')
class TestTSLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.tslib.Timestamp('20160101')
class TestTypes(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
from pandas.types.concat import union_categoricals
c1 = pd.Categorical(list('aabc'))
c2 = pd.Categorical(list('abcd'))
union_categoricals(
[c1, c2],
sort_categories=True,
ignore_order=True)
| mit |
nburn42/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mfjb/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
jonasrothfuss/DeepEpisodicMemory | data_postp/classification.py | 1 | 3526 | import numpy as np
import pandas as pd
import utils.io_handler as io_handler
import os, collections, scipy, itertools, multiprocessing, shutil
import scipy, pickle, json
import sklearn, sklearn.ensemble
import csv
from datetime import datetime
from pprint import pprint
from tensorflow.python.platform import flags
from data_postp.matching import train_and_dump_classifier
from data_postp.similarity_computations import df_col_to_matrix
PICKLE_FILE_TEST = '/common/homes/students/rothfuss/Documents/selected_trainings/8_20bn_gdl_optical_flow/validate/metadata_and_hidden_rep_df_08-14-17_16-17-12_test.pickle'
PICKLE_FILE_VALID = '/common/homes/students/rothfuss/Documents/selected_trainings/8_20bn_gdl_optical_flow/validate/metadata_and_hidden_rep_df_08-09-17_17-00-24_valid.pickle'
def generate_test_labels_csv(valid_df, test_df, dump_path, n_components=100):
#train classifier on valid_df
classifier, pca, _, _ = train_and_dump_classifier(valid_df, class_column="category",
classifier=sklearn.linear_model.LogisticRegression(n_jobs=-1),
n_components=n_components, train_split_ratio=0.8)
test_df = pd.read_pickle(PICKLE_FILE_TEST)
#PCA transform hidden_reps of test_df
transformed_vectors_as_matrix = pca.transform(df_col_to_matrix(test_df['hidden_repr']))
test_df['hidden_repr'] = np.split(transformed_vectors_as_matrix, transformed_vectors_as_matrix.shape[0])
X_test = df_col_to_matrix(test_df['hidden_repr'])
#predict labels
Y = classifier.predict(X_test)
#generate csv
result_df = pd.DataFrame(Y, index=test_df['id'])
result_df.to_csv(dump_path, sep=';')
def calculate_accuracy(valid_df, n_components=[200], n_folds=5,
classifier=sklearn.linear_model.LogisticRegression(n_jobs=-1), dump_file_name=None):
n_components = [n_components] if type(n_components) is not list else n_components
string_to_dump = ''
for n_comp in n_components:
print(type(n_comp))
acc_array, top5_acc_array = [], []
print("Training started with %i PCA components and %i folds" %(n_comp, n_folds))
for i in range(n_folds):
_, _, acc, top5_acc = train_and_dump_classifier(valid_df, class_column="category", classifier=classifier,
n_components=n_comp, train_split_ratio=0.8)
acc_array.append(acc)
top5_acc_array.append(top5_acc)
mean_acc, mean_top5_acc = np.mean(acc_array), np.mean(top5_acc_array)
summary_str = "[%s, %i PCA, %i folds] acc: %.4f top5_acc: %.4f"%("LogisticRegression", n_comp, n_folds, mean_acc, mean_top5_acc)
print(summary_str)
string_to_dump += string_to_dump + '\n'
if dump_file_name:
with open(dump_file_name, 'w') as file:
file.write(string_to_dump)
def main():
valid_df, test_df = pd.read_pickle(PICKLE_FILE_VALID), pd.read_pickle(PICKLE_FILE_TEST)
dump_path = '/common/homes/students/rothfuss/Documents/selected_trainings/8_20bn_gdl_optical_flow/validate/test_labels.csv'
#generate_test_labels_csv(valid_df, test_df, dump_path, n_components=200)
classifier_analysis_dump_file = '/common/homes/students/rothfuss/Documents/selected_trainings/8_20bn_gdl_optical_flow/validate/classifier_analysis/classifier_analysis_logistic.txt'
calculate_accuracy(valid_df, n_components=[100, 150, 200, 250, 300, 400], n_folds=1, dump_file_name=classifier_analysis_dump_file)
if __name__ == "__main__":
main() | mit |
phoebe-project/phoebe2-docs | development/tutorials/eclipse.py | 2 | 2531 | #!/usr/bin/env python
# coding: utf-8
# Eclipse Detection
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new Bundle.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
phoebe.devel_on() # DEVELOPER MODE REQUIRED FOR VISIBLE_PARTIAL - DON'T USE FOR SCIENCE
logger = phoebe.logger()
b = phoebe.default_binary()
# Let's just compute the mesh at a single time-point that we know should be during egress.
# In[3]:
b.add_dataset('mesh', times=[0.05], columns=['visibilities'])
# Native
# -------------------
# The 'native' eclipse method computes what percentage (by area) of each triangle is visible at the current time. It also determines the centroid of the visible portion of each triangle.
#
# Physical quantities (temperatures, intensities, velocities, etc) are computed at the vertices of each triangle, and this centroid is then used to determine the average quantity across the visible portion of the triangle (by assuming a linear gradient across the triangle).
#
# Let's plot the visibilities (ratio of the area that is visible) as the color scale, with red being completely hidden and green being completely visible.
# In[4]:
b.run_compute(eclipse_method='native')
# In[5]:
afig, mplfig = b.plot(component='primary', fc='visibilities', xlim=(-0.5, 0.25), ylim=(-0.4, 0.4), show=True)
# Visible Partial
# -----------
# The 'visible partial' eclipse method simply determines which triangles are hidden, which are visible, and which are partially visible. It then assigns a visibility of 0.5 to any partially visible triangles - meaning they will contribute half of their intensities when integrated (assume that half of the area is visible). There are no longer any centroids - values are still computed at the vertices but are then averaged to be at the geometric center of EACH triangle.
#
# Again, let's plot the visibilities (ratio of the area that is visible) as the color scale, with red being completely hidden and green being completely visible.
# In[6]:
b.run_compute(eclipse_method='visible_partial')
# In[7]:
afig, mplfig = b.plot(component='primary', fc='visibilities', xlim=(-0.5, 0.25), ylim=(-0.4, 0.4), show=True)
# In[ ]:
| gpl-3.0 |
mwv/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
pvlib/pvlib-python | docs/examples/plot_singlediode.py | 4 | 4241 | """
Calculating a module's IV curves
================================
Examples of modeling IV curves using a single-diode circuit equivalent model.
"""
# %%
# Calculating a module IV curve for certain operating conditions is a two-step
# process. Multiple methods exist for both parts of the process. Here we use
# the De Soto model [1]_ to calculate the electrical parameters for an IV
# curve at a certain irradiance and temperature using the module's
# base characteristics at reference conditions. Those parameters are then used
# to calculate the module's IV curve by solving the single-diode equation using
# the Lambert W method.
#
# The single-diode equation is a circuit-equivalent model of a PV
# cell and has five electrical parameters that depend on the operating
# conditions. For more details on the single-diode equation and the five
# parameters, see the `PVPMC single diode page
# <https://pvpmc.sandia.gov/modeling-steps/2-dc-module-iv/diode-equivalent-circuit-models/>`_.
#
# References
# ----------
# .. [1] W. De Soto et al., "Improvement and validation of a model for
# photovoltaic array performance", Solar Energy, vol 80, pp. 78-88, 2006.
#
# Calculating IV Curves
# -----------------------
# This example uses :py:meth:`pvlib.pvsystem.calcparams_desoto` to calculate
# the 5 electrical parameters needed to solve the single-diode equation.
# :py:meth:`pvlib.pvsystem.singlediode` is then used to generate the IV curves.
from pvlib import pvsystem
import pandas as pd
import matplotlib.pyplot as plt
# Example module parameters for the Canadian Solar CS5P-220M:
parameters = {
'Name': 'Canadian Solar CS5P-220M',
'BIPV': 'N',
'Date': '10/5/2009',
'T_NOCT': 42.4,
'A_c': 1.7,
'N_s': 96,
'I_sc_ref': 5.1,
'V_oc_ref': 59.4,
'I_mp_ref': 4.69,
'V_mp_ref': 46.9,
'alpha_sc': 0.004539,
'beta_oc': -0.22216,
'a_ref': 2.6373,
'I_L_ref': 5.114,
'I_o_ref': 8.196e-10,
'R_s': 1.065,
'R_sh_ref': 381.68,
'Adjust': 8.7,
'gamma_r': -0.476,
'Version': 'MM106',
'PTC': 200.1,
'Technology': 'Mono-c-Si',
}
cases = [
(1000, 55),
(800, 55),
(600, 55),
(400, 25),
(400, 40),
(400, 55)
]
conditions = pd.DataFrame(cases, columns=['Geff', 'Tcell'])
# adjust the reference parameters according to the operating
# conditions using the De Soto model:
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
conditions['Geff'],
conditions['Tcell'],
alpha_sc=parameters['alpha_sc'],
a_ref=parameters['a_ref'],
I_L_ref=parameters['I_L_ref'],
I_o_ref=parameters['I_o_ref'],
R_sh_ref=parameters['R_sh_ref'],
R_s=parameters['R_s'],
EgRef=1.121,
dEgdT=-0.0002677
)
# plug the parameters into the SDE and solve for IV curves:
curve_info = pvsystem.singlediode(
photocurrent=IL,
saturation_current=I0,
resistance_series=Rs,
resistance_shunt=Rsh,
nNsVth=nNsVth,
ivcurve_pnts=100,
method='lambertw'
)
# plot the calculated curves:
plt.figure()
for i, case in conditions.iterrows():
label = (
"$G_{eff}$ " + f"{case['Geff']} $W/m^2$\n"
"$T_{cell}$ " + f"{case['Tcell']} $C$"
)
plt.plot(curve_info['v'][i], curve_info['i'][i], label=label)
v_mp = curve_info['v_mp'][i]
i_mp = curve_info['i_mp'][i]
# mark the MPP
plt.plot([v_mp], [i_mp], ls='', marker='o', c='k')
plt.legend(loc=(1.0, 0))
plt.xlabel('Module voltage [V]')
plt.ylabel('Module current [A]')
plt.title(parameters['Name'])
plt.show()
plt.gcf().set_tight_layout(True)
# draw trend arrows
def draw_arrow(ax, label, x0, y0, rotation, size, direction):
style = direction + 'arrow'
bbox_props = dict(boxstyle=style, fc=(0.8, 0.9, 0.9), ec="b", lw=1)
t = ax.text(x0, y0, label, ha="left", va="bottom", rotation=rotation,
size=size, bbox=bbox_props, zorder=-1)
bb = t.get_bbox_patch()
bb.set_boxstyle(style, pad=0.6)
ax = plt.gca()
draw_arrow(ax, 'Irradiance', 20, 2.5, 90, 15, 'r')
draw_arrow(ax, 'Temperature', 35, 1, 0, 15, 'l')
print(pd.DataFrame({
'i_sc': curve_info['i_sc'],
'v_oc': curve_info['v_oc'],
'i_mp': curve_info['i_mp'],
'v_mp': curve_info['v_mp'],
'p_mp': curve_info['p_mp'],
}))
| bsd-3-clause |
xzh86/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
pavelchristof/gomoku-ai | tensorflow/examples/get_started/regression/imports85.py | 8 | 3495 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import pandas as pd
import tensorflow as tf
header = collections.OrderedDict([
("symboling", np.int32),
("normalized-losses", np.float32),
("make", str),
("fuel-type", str),
("aspiration", str),
("num-of-doors", str),
("body-style", str),
("drive-wheels", str),
("engine-location", str),
("wheel-base", np.float32),
("length", np.float32),
("width", np.float32),
("height", np.float32),
("curb-weight", np.float32),
("engine-type", str),
("num-of-cylinders", str),
("engine-size", np.float32),
("fuel-system", str),
("bore", np.float32),
("stroke", np.float32),
("compression-ratio", np.float32),
("horsepower", np.float32),
("peak-rpm", np.float32),
("city-mpg", np.float32),
("highway-mpg", np.float32),
("price", np.float32)
]) # pyformat: disable
def raw():
"""Get the imports85 data and load it as a pd.DataFrame."""
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data" # pylint: disable=line-too-long
# Download and cache the data.
path = tf.contrib.keras.utils.get_file(url.split("/")[-1], url)
# Load the CSV data into a pandas dataframe.
df = pd.read_csv(path, names=header.keys(), dtype=header, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Returns the imports85 shuffled and split into train and test subsets.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
JamesRaynor67/mptcp_with_machine_learning | machineLearning/ver_0.10_q-learning/log_analyzer.py | 2 | 6896 | import csv
import matplotlib.pyplot as plt
import sys
def analyze_application(file_path):
record = []
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
next(spamreader)
total_psize = 0
for row in spamreader:
if int(row[1]) == 1: # not send record
timestamp = int(row[0])/10e8
total_psize += int(row[7])
record.append([timestamp, total_psize])
record.sort(key=lambda ele:ele[0])
x, y = [], []
for pair in record:
x.append(pair[0])
y.append(pair[1])
sent_packet_size, = plt.plot(x, y, 'go')
plt.legend([sent_packet_size], ['sent packet size'], loc='upper left')
plt.title('Time-Sent packet size')
plt.xlabel('Time / s', fontsize = 14, color = 'black')
plt.ylabel('Sent packet size / Byte', fontsize = 14, color = 'black')
print 'server send total: ', y[-1], ' Bytes'
def analyze_client_end_node(file_path):
record = []
# '/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_client'
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
next(spamreader)
for row in spamreader:
if int(row[1]) == 0: # not send record
timestamp = int(row[0])/10e8
subflowId = int(row[3])
seqnum = int(row[4])
record.append([timestamp, subflowId, seqnum])
record.sort(key=lambda ele:ele[0])
x, y = [[],[]], [[],[]]
for row in record:
# subflow id is from 0 to n-1
x[row[1]].append(row[0])
y[row[1]].append(row[2])
subflow_1, = plt.plot(x[0], y[0], 'ro')
subflow_2, = plt.plot(x[1], y[1], 'bo')
plt.legend([subflow_1, subflow_2], ['client side subflow 1', 'client side subflow 2'], loc='upper left')
plt.title('Client Side Time-Seqence number')
plt.xlabel('Time / s', fontsize = 14, color = 'black')
plt.ylabel('Seqence number', fontsize = 14, color = 'black')
def analyze_server_end_point(file_path):
record = []
# '/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_client'
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
next(spamreader)
for row in spamreader:
if int(row[1]) == 1: # not send record
timestamp = int(row[0])/10e8
subflowId = int(row[3])
seqnum = int(row[4])
record.append([timestamp, subflowId, seqnum])
record.sort(key=lambda ele:ele[0])
x, y = [[],[]], [[],[]]
for row in record:
# subflow id is from 0 to n-1
x[row[1]].append(row[0])
y[row[1]].append(row[2])
subflow_1, = plt.plot(x[0], y[0], 'ro')
subflow_2, = plt.plot(x[1], y[1], 'bo')
plt.legend([subflow_1, subflow_2], ['server side subflow 1', 'server side subflow 2'], loc='upper left')
plt.title('Server Side Time-Seqence number')
plt.xlabel('Time / s', fontsize = 14, color = 'black')
plt.ylabel('Seqence number', fontsize = 14, color = 'black')
def analyze_flow(file_path):
record = []
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
next(spamreader)
for row in spamreader:
timestamp = int(row[0])/10e8
flowId = int(row[1])
TxPacket = int(row[4])
RxPacket = int(row[6])
delaySum = float(row[8][1:-2])/10e8
lostPackets = int(row[10])
record.append([timestamp, flowId, TxPacket, RxPacket, delaySum, lostPackets])
record.sort(key=lambda ele:ele[0])
x = [[],[],[],[],[]]
y = [[],[],[],[],[]]
for row in record:
# flow id is from 1 to N,
x[row[1]].append(row[0]) # append time stamp for flow with id row[1]
y[row[1]].append(row[2]) # append TxPacket num for flow with id row[1]
s_c_subflow_1, = plt.plot(x[1], y[1], 'r-', linewidth=2.0) # s->c 1
c_s_subflow_1, = plt.plot(x[2], y[2], 'r-.', linewidth=2.0) # c->s 1
s_c_subflow_2, = plt.plot(x[3], y[3], 'b-', linewidth=2.0) # s->c 2
c_s_subflow_2, = plt.plot(x[4], y[4], 'b-.', linewidth=2.0) # c->s 2
plt.legend([s_c_subflow_1, c_s_subflow_1, s_c_subflow_2, c_s_subflow_2],
['server to client packet number over subflow 1', 'client to server packet number over subflow 1',
'server to client packet number over subflow 2', 'client to server packet number over subflow 2'], loc='upper left')
plt.title('Time-TxPacket')
plt.xlabel('Time / s', fontsize = 14, color = 'black')
plt.ylabel('Packet number', fontsize = 14, color = 'black')
def analyze_reward(file_path):
record = []
# '/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_client'
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
next(spamreader)
for row in spamreader:
timestamp = int(row[0])/10e9
reward = int(row[1])
record.append([timestamp, reward])
record.sort(key=lambda ele:ele[0])
x, y = [], []
for pair in record:
x.append(pair[0])
y.append(pair[1])
reward_plt, = plt.plot(x, y, 'k-')
plt.legend([reward_plt], ['reward'], loc='best')
plt.title('Time-Reward')
plt.xlabel('Time / s', fontsize = 14, color = 'black')
plt.ylabel('Reward', fontsize = 14, color = 'black')
if __name__ == '__main__':
# plt.subplot(4,1,1)
# analyze_application('/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_server')
# plt.subplot(4,1,2)
# analyze_client_end_node('/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_client')
# plt.subplot(4,1,3)
# analyze_server_end_point('/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_server')
# plt.subplot(4,1,4)
# analyze_flow('/home/hong/workspace/mptcp/ns3/mptcp_output/mptcp_server_cWnd')
# plt.show()
batch_num = int(sys.argv[1])
plt.subplot(4,1,1)
analyze_application('/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(batch_num) + '_mptcp_server')
# analyze_application('/home/hong/workspace/mptcp/ns3/rl_training_data_wrong/' + str(batch_num) + '_mptcp_server')
# analyze_flow('/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(batch_num) + '_mptcp_server_cWnd')
# analyze_reward('/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(batch_num) + '_calculate_reward')
plt.subplot(4,1,2)
analyze_client_end_node('/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(batch_num) + '_mptcp_client')
plt.subplot(4,1,3)
analyze_server_end_point('/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(batch_num) + '_mptcp_server')
plt.subplot(4,1,4)
analyze_flow('/home/hong/workspace/mptcp/ns3/rl_training_data/' + str(batch_num) + '_mptcp_server_cWnd')
plt.show()
| gpl-2.0 |
endolith/scipy | doc/source/tutorial/stats/plots/qmc_plot_conv_mc_sobol.py | 12 | 2386 | """Integration convergence comparison: MC vs Sobol'.
The function is a synthetic example specifically designed
to verify the correctness of the implementation [2]_.
References
----------
.. [1] I. M. Sobol. The distribution of points in a cube and the accurate
evaluation of integrals. Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
1967.
.. [2] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
2020.
"""
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import qmc
n_conv = 99
ns_gen = 2 ** np.arange(4, 13) # 13
def art_2(sample):
# dim 3, true value 5/3 + 5*(5 - 1)/4
return np.sum(sample, axis=1) ** 2
functions = namedtuple('functions', ['name', 'func', 'dim', 'ref'])
case = functions('Art 2', art_2, 5, 5 / 3 + 5 * (5 - 1) / 4)
def conv_method(sampler, func, n_samples, n_conv, ref):
samples = [sampler(n_samples) for _ in range(n_conv)]
samples = np.array(samples)
evals = [np.sum(func(sample)) / n_samples for sample in samples]
squared_errors = (ref - np.array(evals)) ** 2
rmse = (np.sum(squared_errors) / n_conv) ** 0.5
return rmse
# Analysis
sample_mc_rmse = []
sample_sobol_rmse = []
rng = np.random.default_rng()
for ns in ns_gen:
# Monte Carlo
sampler_mc = lambda x: rng.random((x, case.dim))
conv_res = conv_method(sampler_mc, case.func, ns, n_conv, case.ref)
sample_mc_rmse.append(conv_res)
# Sobol'
engine = qmc.Sobol(d=case.dim, scramble=False)
conv_res = conv_method(engine.random, case.func, ns, 1, case.ref)
sample_sobol_rmse.append(conv_res)
sample_mc_rmse = np.array(sample_mc_rmse)
sample_sobol_rmse = np.array(sample_sobol_rmse)
# Plot
fig, ax = plt.subplots(figsize=(4, 4))
ax.set_aspect('equal')
# MC
ratio = sample_mc_rmse[0] / ns_gen[0] ** (-1 / 2)
ax.plot(ns_gen, ns_gen ** (-1 / 2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_mc_rmse, label="MC")
# Sobol'
ratio = sample_sobol_rmse[0] / ns_gen[0] ** (-2/2)
ax.plot(ns_gen, ns_gen ** (-2/2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_sobol_rmse, label="Sobol' unscrambled")
ax.set_xlabel(r'$N_s$')
ax.set_xscale('log')
ax.set_xticks(ns_gen)
ax.set_xticklabels([fr'$2^{{{ns}}}$' for ns in np.arange(4, 13)])
ax.set_ylabel(r'$\log (\epsilon)$')
ax.set_yscale('log')
ax.legend(loc='upper right')
fig.tight_layout()
plt.show()
| bsd-3-clause |
bospetersen/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py | 1 | 6138 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def weights_var_imp(ip,port):
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy"],
min_rows=5,
ntrees=5,
max_depth=2)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy"],
training_frame=data2,
min_rows=5*min_rows_scale,
weights_column="weights",
ntrees=5,
max_depth=2)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy_20mpg"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=2)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["cylinders"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=2)
reg1_vi = gbm1_regression.varimp(return_list=True)
reg2_vi = gbm2_regression.varimp(return_list=True)
bin1_vi = gbm1_binomial.varimp(return_list=True)
bin2_vi = gbm2_binomial.varimp(return_list=True)
mul1_vi = gbm1_multinomial.varimp(return_list=True)
mul2_vi = gbm2_multinomial.varimp(return_list=True)
print "Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)
print "Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)
print "Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)
for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2)
for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2)
for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2)
h2o_cars_data = h2o.import_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "\n\nChecking that using uniform weights is equivalent to no weights:"
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
tests.run_test(sys.argv, weights_var_imp)
| apache-2.0 |
nok/sklearn-porter | tests/estimator/classifier/KNeighborsClassifier/KNeighborsClassifierJavaTest.py | 1 | 1130 | # -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
from sklearn.neighbors import KNeighborsClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.estimator.classifier.ExportedData import ExportedData
from tests.language.Java import Java
class KNeighborsClassifierJavaTest(Java, Classifier, ExportedData, TestCase):
def setUp(self):
super(KNeighborsClassifierJavaTest, self).setUp()
self.estimator = KNeighborsClassifier(n_neighbors=3)
def tearDown(self):
super(KNeighborsClassifierJavaTest, self).tearDown()
@unittest.skip('The generated code would be too large.')
def test_existing_features__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_random_features__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_existing_features__digits_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_random_features__digits_data__default(self):
pass
| mit |
mugizico/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
caspar/PhysicsLab | 17_PhotoelecticEffect/planck.py | 1 | 1747 | # Lab 0
# Linear Least Squares Fit
# Author Caspar Lant
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
# load csv file
DATA = "planck.csv";
frequency, voltage = np.loadtxt(DATA, skiprows=1 , unpack=True, delimiter=',');
# plot temperature vs. pressure + error bars
plt.ylabel("Voltage (V)");
plt.xlabel("Frequency ($10^{14}$ Hz)");
plt.title("Voltage vs. Frequency");
plt.errorbar(frequency, voltage, yerr=0.1, linestyle = '', mec='r', ms=5 );
dv = 0.1
# linear least squares fit line
def least_squares_fit (x, y):
xavg = x.mean()
slope = ( y * ( x - xavg)).sum() / (x*(x-xavg)).sum()
intercept = y.mean()-slope*xavg
return slope, intercept
slope, intercept = least_squares_fit(frequency, voltage);
# create arrays to plot
y1 = slope * 7 + intercept; # y1 = m(x1) + b
y2 = slope * 0 + intercept; # y2 = m(x2) + b
x_range = [0, 7]; # array of x values
y_range = [y2, y1]; # array of y values
PLANCK = slope* 1.60217662
print("plancks constant:", PLANCK)
print("or", 1/PLANCK)
# show the graph
plt.plot(x_range, y_range, color="blue", linestyle = '-', label="Actual");
slope = 0.413566766
y1 = slope * 7 + intercept; # y1 = m(x1) + b
y2 = slope * 0 + intercept; # y2 = m(x2) + b
x_range = [0, 7]; # array of x values
y_range = [y2, y1]; # array of y values
PLANCK = slope * 1.60217662
# print("plancks constant:", PLANCK)
# print("or", 1/PLANCK)
# show the graph
plt.plot(x_range, y_range, color="grey",linestyle = ':', label="Expected");
plt.legend(loc='best')
plt.annotate("Slope = $6.14 * 10^{-34}$", xy=(2.27, -0.32), xytext=(2.5, -.7), arrowprops=dict(arrowstyle="->"))
# plt.legend(["slope = 1"])
plt.show();
| mit |
HackerPack/thePlaneteers | spark/twitterStream.py | 2 | 3507 | from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from geopy.geocoders import Nominatim
import operator
import numpy as np
import matplotlib.pyplot as plt
import nltk
import requests
import json
import commands
from HTMLParser import HTMLParser
def main():
conf = SparkConf().setMaster("local[2]").setAppName("Streamer")
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 10) # Create a streaming context with batch interval of 10 sec
ssc.checkpoint("checkpoint")
geolocator = Nominatim()
stream(ssc,geolocator,100)
def analyse(tweet,geolocator):
OriginalTweet,Location,TimeOfTweet,Latitude,Longitude = tweet.split('---')
TaskType = ""
Taken = "0"
Finished = "0"
Priority = "0"
Disaster = ""
tweetDict = {}
if '#' in OriginalTweet:
start_index = OriginalTweet.index('#') + 1
Disaster = OriginalTweet[start_index:OriginalTweet.index(' ',start_index)]
if 'available' in OriginalTweet:
TaskType = 'GOODS'
elif 'need' in OriginalTweet:
TaskType = 'GOODS'
elif 'missing' in OriginalTweet:
TaskType = 'MISSING'
Priority = '5'
elif 'SOS' in OriginalTweet:
TaskType = 'SOS'
else:
TaskType = 'UNKNOWN'
if 'ambulance' in OriginalTweet:
TaskType = 'AMBULANCE'
Priority = '10'
if 'emergency' in OriginalTweet:
Priority = '10'
if 'urgent' in OriginalTweet:
Priority = '10'
tweetDict['TaskType'] = TaskType
tweetDict['Disaster'] = Disaster
tweetDict['Taken'] = Taken
tweetDict['Finished'] = Finished
tweetDict['OriginalTweet'] = OriginalTweet
tweetDict['Location'] = Location
tweetDict['TimeOfTweet'] = TimeOfTweet
tweetDict['Priority'] = Priority
tweetDict['Latitude'] = float(Latitude)
tweetDict['Longitude'] = float(Longitude)
jsons = json.dumps(tweetDict)
#print jsons
#requests.post('http://hoyadisastermanagement.firebaseio.com/tasks2',json.dumps(tweetDict))
command = "curl -X POST -d '" + str(jsons) + "' 'https://hoyadisastermanagement.firebaseio.com/tasks.json'"
run_command = commands.getstatusoutput(command)
#print run_command
return 1
def stream(ssc,geolocator,duration):
kstream = KafkaUtils.createDirectStream(
ssc, topics = ['twitterstream'], kafkaParams = {"metadata.broker.list": 'localhost:9092'})
tweets = kstream.map(lambda x: x[1].encode("ascii","ignore"))
# Each element of tweets will be the text of a tweet.
# You need to find the count of all the positive and negative words in these tweets.
# Keep track of a running total counts and print this at every time step (use the pprint function).
# YOUR CODE HERE
tweets.pprint()
tweetDstream = tweets.map(lambda line: analyse(line,geolocator))
tweetDstream.pprint(2)
#tweetDstream.pprint(2)
# Let the counts variable hold the word counts for all time steps
# You will need to use the foreachRDD function.
# For our implementation, counts looked like:
# [[("positive", 100), ("negative", 50)], [("positive", 80), ("negative", 60)], ...]
counts = []
# tweetDstream.foreachRDD(lambda rdd: rdd.collect())
ssc.start() # Start the computation
ssc.awaitTerminationOrTimeout(duration)
ssc.stop(stopGraceFully=True)
return counts
if __name__=="__main__":
main()
| apache-2.0 |
caisq/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops.py | 6 | 176288 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value')
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon,
name)
precision_value = compute_precision_at_recall(
values['tp'], values['fp'], values['fn'], 'value')
update_op = compute_precision_at_recall(
update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None,
'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
_, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_op = state_ops.assign_add(count_, num_values)
if metrics_collections:
ops.add_to_collections(metrics_collections, count_)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return count_, update_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
metrics_impl._safe_div( # pylint: disable=protected-access
pe_row * pe_col, total, None))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
| apache-2.0 |
jcrudy/py-earth | examples/plot_output_weight.py | 3 | 1919 | """
=================================================================
Demonstrating a use of weights in outputs with two sine functions
=================================================================
Each row in the grid is a run of an earth model.
Each column is an output.
In each run, different weights are given to
the outputs.
"""
import numpy as np
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
np.random.seed(2)
m = 10000
n = 10
X = 80 * np.random.uniform(size=(m, n)) - 40
y1 = 120 * np.abs(np.sin((X[:, 6]) / 6) - 1.0) + 15 * np.random.normal(size=m)
y2 = 120 * np.abs(np.sin((X[:, 5]) / 6) - 1.0) + 15 * np.random.normal(size=m)
y1 = (y1 - y1.mean()) / y1.std()
y2 = (y2 - y2.mean()) / y2.std()
y_mix = np.concatenate((y1[:, np.newaxis], y2[:, np.newaxis]), axis=1)
alphas = [0.9, 0.8, 0.6, 0.4, 0.2, 0.1]
n_plots = len(alphas)
k = 1
fig = plt.figure(figsize=(10, 15))
for i, alpha in enumerate(alphas):
# Fit an Earth model
model = Earth(max_degree=5,
minspan_alpha=.05,
endspan_alpha=.05,
max_terms=10,
check_every=1,
thresh=0.)
output_weight = np.array([alpha, 1 - alpha])
model.fit(X, y_mix, output_weight=output_weight)
print(model.summary())
# Plot the model
y_hat = model.predict(X)
mse = ((y_hat - y_mix) ** 2).mean(axis=0)
ax = plt.subplot(n_plots, 2, k)
ax.set_ylabel("Run {0}".format(i + 1), rotation=0, labelpad=20)
plt.plot(X[:, 6], y_mix[:, 0], 'r.')
plt.plot(X[:, 6], model.predict(X)[:, 0], 'b.')
plt.title("MSE: {0:.3f}, Weight : {1:.1f}".format(mse[0], alpha))
plt.subplot(n_plots, 2, k + 1)
plt.plot(X[:, 5], y_mix[:, 1], 'r.')
plt.plot(X[:, 5], model.predict(X)[:, 1], 'b.')
plt.title("MSE: {0:.3f}, Weight : {1:.1f}".format(mse[1], 1 - alpha))
k += 2
plt.tight_layout()
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/io/spss.py | 1 | 1181 | from pathlib import Path
from typing import Optional, Sequence, Union
from pandas.compat._optional import import_optional_dependency
from pandas.api.types import is_list_like
from pandas.core.api import DataFrame
def read_spss(
path: Union[str, Path],
usecols: Optional[Sequence[str]] = None,
convert_categoricals: bool = True,
) -> DataFrame:
"""
Load an SPSS file from the file path, returning a DataFrame.
.. versionadded 0.25.0
Parameters
----------
path : string or Path
File path
usecols : list-like, optional
Return a subset of the columns. If None, return all columns.
convert_categoricals : bool, default is True
Convert categorical columns into pd.Categorical.
Returns
-------
DataFrame
"""
pyreadstat = import_optional_dependency("pyreadstat")
if usecols is not None:
if not is_list_like(usecols):
raise TypeError("usecols must be list-like.")
else:
usecols = list(usecols) # pyreadstat requires a list
df, _ = pyreadstat.read_sav(
path, usecols=usecols, apply_value_formats=convert_categoricals
)
return df
| apache-2.0 |
arahuja/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e303.py | 2 | 5985 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=False,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-02,
learning_rate_changes_by_iteration={
500: 5e-03,
4000: 1e-03,
6000: 5e-06,
7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
)
def callback(net, epoch):
net.source.reshape_target_to_2D = True
net.plotter = MDNPlotter(net)
net.generate_validation_data_and_set_shapes()
net.loss_function = lambda x, t: mdn_nll(x, t).mean()
net.learning_rate = 1e-05
def exp_a(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy['reshape_target_to_2D'] = False
source = RealApplianceSource(**source_dict_copy)
source.reshape_target_to_2D = False
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'W': Normal(std=1/sqrt(N)),
'num_units': source.n_outputs,
'nonlinearity': None
}
]
net_dict_copy['layer_changes'] = {
5001: {
'remove_from': -2,
'callback': callback,
'new_layers': [
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
}
}
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=100000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/backends/backend_gtk3agg.py | 6 | 3144 | import cairo
import numpy as np
import sys
import warnings
import backend_agg
import backend_gtk3
from matplotlib.figure import Figure
from matplotlib import transforms
if sys.version_info[0] >= 3:
warnings.warn("The Gtk3Agg backend is not known to work on Python 3.x.")
class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
backend_agg.FigureCanvasAgg):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
self._bbox_queue = []
def _renderer_init(self):
pass
def _render_figure(self, width, height):
backend_agg.FigureCanvasAgg.draw(self)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
if not len(self._bbox_queue):
if self._need_redraw:
self._render_figure(w, h)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
return
else:
bbox_queue = self._bbox_queue
for bbox in bbox_queue:
area = self.copy_from_bbox(bbox)
buf = np.fromstring(area.to_string_argb(), dtype='uint8')
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
image = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_ARGB32, width, height)
ctx.set_source_surface(image, x, y)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(backend_agg.FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Agg(figure)
manager = FigureManagerGTK3Agg(canvas, num)
return manager
FigureManager = FigureManagerGTK3Agg
show = backend_gtk3.show
| mit |
PythonCharmers/orange3 | Orange/projection/manifold.py | 7 | 2347 | import sklearn.manifold as skl_manifold
from Orange.distance import SklDistance, SpearmanDistance, PearsonDistance
from Orange.projection import SklProjector
__all__ = ["MDS", "Isomap", "LocallyLinearEmbedding"]
class MDS(SklProjector):
__wraps__ = skl_manifold.MDS
name = 'mds'
def __init__(self, n_components=2, metric=True, n_init=4, max_iter=300,
eps=0.001, n_jobs=1, random_state=None,
dissimilarity='euclidean',
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
self._metric = dissimilarity
def __call__(self, data):
distances = SklDistance, SpearmanDistance, PearsonDistance
if isinstance(self._metric, distances):
data = self.preprocess(data)
X, Y, domain = data.X, data.Y, data.domain
dist_matrix = self._metric(X)
self.params['dissimilarity'] = 'precomputed'
clf = self.fit(dist_matrix, Y=Y)
elif self._metric is 'precomputed':
dist_matrix, Y, domain = data, None, None
clf = self.fit(dist_matrix, Y=Y)
else:
data = self.preprocess(data)
X, Y, domain = data.X, data.Y, data.domain
clf = self.fit(X, Y=Y)
clf.domain = domain
return clf
def fit(self, X, init=None, Y=None):
proj = self.__wraps__(**self.params)
return proj.fit(X, init=init, y=Y)
class Isomap(SklProjector):
__wraps__ = skl_manifold.Isomap
name = 'isomap'
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
max_iter=None, path_method='auto',
neighbors_algorithm='auto', preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class LocallyLinearEmbedding(SklProjector):
__wraps__ = skl_manifold.LocallyLinearEmbedding
name = 'lle'
def __init__(self, n_neighbors=5, n_components=2, reg=0.001,
eigen_solver='auto', tol=1e-06 , max_iter=100,
method='standard', hessian_tol=0.0001,
modified_tol=1e-12, neighbors_algorithm='auto',
random_state=None, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
| gpl-3.0 |
hainm/scipy | scipy/special/add_newdocs.py | 24 | 70839 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when x is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
| bsd-3-clause |
dgraham999/Partner_Forecast | predict_rfr.py | 1 | 3012 | import os as os
import pandas as pd
import numpy as np
import pickle as pkl
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import train_test_split
from datetime import datetime, timedelta
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
import math as ma
from sklearn.ensemble import RandomForestRegressor as RFR
#set seed for reproducibility
seed = 73
np.random.seed(73)
#fetch data
#with open('Partner_Select_Qtr.pkl', 'rb') as f:
# data = pkl.load(f)
with open('Part_Qtr_Rev_Data.pkl', 'rb') as f:
data = pkl.load(f)
#drop columns for classifying data and date variable
cols = data.columns[1:9]
cols = cols.append(data.columns[-1:])
df = data.drop(cols,axis=1)
#construct column list for feature importance
rfrcols = df.columns[1:-1]
#create id list in df.ID
idx = set([int(i) for i in df.ID])
#iterate through partner id constructing dataframe of feature importances
scores = {}
feature_importance = {}
columns = ['ID']
columns.extend(rfrcols)
#iterate through selected id's
for id in idx:
dr = df[df['ID'] == id]
rev = dr.pop('Rev').values.reshape(-1,1)
dr = dr.drop(['ID'],axis=1)
#scale data
scaler = StandardScaler()
X = scaler.fit_transform(dr)
y = scaler.fit_transform(rev).ravel()
rfr = RFR(n_estimators=25,random_state=seed,oob_score=True,n_jobs=-1)
rfr.fit(X,y)
fi = rfr.feature_importances_
rank = np.argsort(fi)[::-1]
#fs = list(fi.sort())[::-1]
feature_weight = [fi[r] for r in rank]
feature_list=[rfrcols[r] for r in rank]
feature_rank = range(1,len(rank)+1)
features = list(zip(feature_rank,feature_list,feature_weight))
feature_importance[id] = features
scores[id] = rfr.score(X,y)
#pickle and save feature importances by ID
filestr = 'Feature_Importance_ID.pkl'
with open(filestr,'wb') as f:
pkl.dump(feature_importance,f,-1)
#pickle and save scores by ID dictionary
filestr = 'RFR_Scores_ID.pkl'
with open(filestr,'wb') as f:
pkl.dump(scores,f,-1)
#compute RFR on entire sample set with no ID selection
cols = data.columns[0:9]
cols = cols.append(data.columns[-1:])
df = data.drop(cols,axis=1)
#feature names
cols = df.columns
#form y target
rev = df.pop('Rev').values.reshape(-1,1)
#scale data
scaler = StandardScaler()
X = scaler.fit_transform(df)
y = scaler.fit_transform(rev).ravel()
#build regressor model
rfr = RFR(n_estimators=25,random_state=seed,oob_score=True,n_jobs=-1)
rfr.fit(X,y)
#set feature importances and rank
fi = rfr.feature_importances_
rank = np.argsort(fi)[::-1]
#create list of feature importance info
feature_rank = range(1,len(rank)+1)
feature_weight = [fi[r] for r in rank]
feature_list=[cols[r] for r in rank]
features = list(zip(feature_rank,feature_list,feature_weight))
score =rfr.score(X,y)
#save results
filestr = 'Feature_Importance.pkl'
with open(filestr,'wb') as f:
pkl.dump(features,f,-1)
#pickle and save scores by ID dictionary
filestr = 'RFR_Score.pkl'
with open(filestr,'wb') as f:
pkl.dump(score,f,-1)
| gpl-3.0 |
scw/geopandas | tests/test_sindex.py | 7 | 4619 | import shutil
import tempfile
import numpy as np
from numpy.testing import assert_array_equal
from pandas import Series, read_csv
from shapely.geometry import (Polygon, Point, LineString,
MultiPoint, MultiLineString, MultiPolygon)
from shapely.geometry.base import BaseGeometry
from geopandas import GeoSeries, GeoDataFrame, base, read_file
from .util import unittest, geom_equals, geom_almost_equals
@unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
class TestSeriesSindex(unittest.TestCase):
def test_empty_index(self):
self.assert_(GeoSeries().sindex is None)
def test_point(self):
s = GeoSeries([Point(0, 0)])
self.assertEqual(s.sindex.size, 1)
hits = s.sindex.intersection((-1, -1, 1, 1))
self.assertEqual(len(list(hits)), 1)
hits = s.sindex.intersection((-2, -2, -1, -1))
self.assertEqual(len(list(hits)), 0)
def test_empty_point(self):
s = GeoSeries([Point()])
self.assert_(GeoSeries().sindex is None)
def test_polygons(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
s = GeoSeries([t1, t2, sq])
self.assertEqual(s.sindex.size, 3)
def test_polygons_append(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
s = GeoSeries([t1, t2, sq])
t = GeoSeries([t1, t2, sq], [3,4,5])
s = s.append(t)
self.assertEqual(len(s), 6)
self.assertEqual(s.sindex.size, 6)
def test_lazy_build(self):
s = GeoSeries([Point(0, 0)])
self.assert_(s._sindex is None)
self.assertEqual(s.sindex.size, 1)
self.assert_(s._sindex is not None)
@unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
class TestFrameSindex(unittest.TestCase):
def setUp(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
self.df = GeoDataFrame(data, geometry='location')
def test_sindex(self):
self.df.crs = {'init': 'epsg:4326'}
self.assertEqual(self.df.sindex.size, 5)
hits = list(self.df.sindex.intersection((2.5, 2.5, 4, 4), objects=True))
self.assertEqual(len(hits), 2)
self.assertEqual(hits[0].object, 3)
def test_lazy_build(self):
self.assert_(self.df._sindex is None)
self.assertEqual(self.df.sindex.size, 5)
self.assert_(self.df._sindex is not None)
def test_sindex_rebuild_on_set_geometry(self):
# First build the sindex
self.assert_(self.df.sindex is not None)
self.df.set_geometry(
[Point(x, y) for x, y in zip(range(5, 10), range(5, 10))],
inplace=True)
self.assert_(self.df._sindex_valid == False)
@unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
class TestJoinSindex(unittest.TestCase):
def setUp(self):
self.boros = read_file(
"/nybb_14a_av/nybb.shp",
vfs="zip://examples/nybb_14aav.zip")
def test_merge_geo(self):
# First check that we gets hits from the boros frame.
tree = self.boros.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[self.boros.ix[hit.object]['BoroName'] for hit in hits],
['Bronx', 'Queens'])
# Check that we only get the Bronx from this view.
first = self.boros[self.boros['BoroCode'] < 3]
tree = first.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[first.ix[hit.object]['BoroName'] for hit in hits],
['Bronx'])
# Check that we only get Queens from this view.
second = self.boros[self.boros['BoroCode'] >= 3]
tree = second.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[second.ix[hit.object]['BoroName'] for hit in hits],
['Queens'])
# Get both the Bronx and Queens again.
merged = first.merge(second, how='outer')
self.assertEqual(len(merged), 5)
self.assertEqual(merged.sindex.size, 5)
tree = merged.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[merged.ix[hit.object]['BoroName'] for hit in hits],
['Bronx', 'Queens'])
| bsd-3-clause |
abitofalchemy/hrg_nets | net_metrics.py | 1 | 46739 | __author__ = ['Salvador Aguinaga', 'Rodrigo Palacios', 'David Chaing', 'Tim Weninger']
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize':'small',
'figure.figsize': (1.6 * 10, 1.0 * 10),
'axes.labelsize': 'small',
'axes.titlesize': 'small',
'xtick.labelsize':'small',
'ytick.labelsize':'small'}
pylab.rcParams.update(params)
import matplotlib.gridspec as gridspec
import pandas as pd
import numpy as np
import random
import collections
from collections import Counter
from random import sample
import math
def draw_ugander_graphlet_plot(orig_g, mG, ergm=[], rmat=[]):
df = pd.DataFrame(mG)
width = .25
if len(ergm) > 0:
dfergm = pd.DataFrame(ergm)
width = .20
if len(rmat) > 0:
rmat = pd.DataFrame(rmat)
width = .20
N = 11
dforig = pd.DataFrame(orig_g)
means = (dforig.mean()['e0'], dforig.mean()['e1'], dforig.mean()['e2'], dforig.mean()['e2c'], dforig.mean()['tri'],
dforig.mean()['p3'], dforig.mean()['star'], dforig.mean()['tritail'], dforig.mean()['square'],
dforig.mean()['squarediag'], dforig.mean()['k4'])
sem = (dforig.sem()['e0'], dforig.sem()['e1'], dforig.sem()['e2'], dforig.sem()['e2c'], dforig.sem()['tri'],
dforig.sem()['p3'], dforig.sem()['star'], dforig.sem()['tritail'], dforig.sem()['square'],
dforig.sem()['squarediag'], dforig.sem()['k4'])
ind = np.arange(N)
fig, ax = plt.subplots()
print means
rects = ax.bar(ind + .02, means, width - .02, color='k', yerr=sem)
means = (df.mean()['e0'], df.mean()['e1'], df.mean()['e2'], df.mean()['e2c'], df.mean()['tri'], df.mean()['p3'],
df.mean()['star'], df.mean()['tritail'], df.mean()['square'], df.mean()['squarediag'], df.mean()['k4'])
sem = (
df.sem()['e0'], df.sem()['e1'], df.sem()['e2'], df.sem()['e2c'], df.sem()['tri'], df.sem()['p3'],
df.sem()['star'],
df.sem()['tritail'], df.sem()['square'], df.sem()['squarediag'], df.sem()['k4'])
rects = ax.bar(ind + width + .02, means, width - .02, color='b', yerr=sem)
print means
ax.set_yscale("log", nonposy='clip')
if len(ergm) > 0:
means = (
dfergm.mean()['e0'], dfergm.mean()['e1'], dfergm.mean()['e2'], dfergm.mean()['e2c'], dfergm.mean()['tri'],
dfergm.mean()['p3'], dfergm.mean()['star'], dfergm.mean()['tritail'], dfergm.mean()['square'],
dfergm.mean()['squarediag'], dfergm.mean()['k4'])
sem = (dfergm.sem()['e0'], dfergm.sem()['e1'], dfergm.sem()['e2'], dfergm.sem()['e2c'], dfergm.sem()['tri'],
dfergm.sem()['p3'], dfergm.sem()['star'], dfergm.sem()['tritail'], dfergm.sem()['square'],
dfergm.sem()['squarediag'], dfergm.sem()['k4'])
rects = ax.bar(ind + width + width + width + .02, means, width - .02, color='r', yerr=sem)
print means
if len(rmat) > 0:
means = (rmat.mean()['e0'], rmat.mean()['e1'], rmat.mean()['e2'], rmat.mean()['e2c'], rmat.mean()['tri'],
rmat.mean()['p3'], rmat.mean()['star'], rmat.mean()['tritail'], rmat.mean()['square'],
rmat.mean()['squarediag'], rmat.mean()['k4'])
print means
rects = ax.bar(ind + width + width + .02, means, width - .02, color='purple')
plt.ylim(ymin=0)
# fig = plt.gcf()
# fig.set_size_inches(5, 3, forward=True)
plt.show()
def hops(all_succs, start, level=0, debug=False):
if debug: print("level:", level)
succs = all_succs[start] if start in all_succs else []
if debug: print("succs:", succs)
lensuccs = len(succs)
if debug: print("lensuccs:", lensuccs)
if debug: print()
if not succs:
yield level, 0
else:
yield level, lensuccs
for succ in succs:
# print("succ:", succ)
for h in hops(all_succs, succ, level + 1):
yield h
def get_graph_hops(graph, num_samples):
c = Counter()
for i in range(0, num_samples):
node = sample(graph.nodes(), 1)[0]
b = nx.bfs_successors(graph, node)
for l, h in hops(b, node):
c[l] += h
hopper = Counter()
for l in c:
hopper[l] = float(c[l]) / float(num_samples)
return hopper
def bfs_eff_diam(G, NTestNodes, P):
if G.number_of_nodes() == 0:
return 0
EffDiam = -1
FullDiam = -1
AvgSPL = -1
DistToCntH = {}
NodeIdV = nx.nodes(G)
random.shuffle(NodeIdV)
for tries in range(0, min(NTestNodes, nx.number_of_nodes(G))):
NId = NodeIdV[tries]
b = nx.bfs_successors(G, NId)
for l, h in hops(b, NId):
if h is 0: continue
if not l + 1 in DistToCntH:
DistToCntH[l + 1] = h
else:
DistToCntH[l + 1] += h
DistNbrsPdfV = {}
SumPathL = 0.0
PathCnt = 0.0
for i in DistToCntH.keys():
DistNbrsPdfV[i] = DistToCntH[i]
SumPathL += i * DistToCntH[i]
PathCnt += DistToCntH[i]
oDistNbrsPdfV = collections.OrderedDict(sorted(DistNbrsPdfV.items()))
CdfV = oDistNbrsPdfV
for i in range(1, len(CdfV)):
if not i + 1 in CdfV:
CdfV[i + 1] = 0
CdfV[i + 1] = CdfV[i] + CdfV[i + 1]
EffPairs = P * CdfV[next(reversed(CdfV))]
for ValN in CdfV.keys():
if CdfV[ValN] > EffPairs: break
if ValN >= len(CdfV): return next(reversed(CdfV))
if ValN is 0: return 1
# interpolate
DeltaNbrs = CdfV[ValN] - CdfV[ValN - 1];
if DeltaNbrs is 0: return ValN;
return ValN - 1 + (EffPairs - CdfV[ValN - 1]) / DeltaNbrs
def draw_diam_plot(orig_g, mG):
df = pd.DataFrame(mG)
gD = bfs_eff_diam(orig_g, 20, .9)
ori_degree_seq = []
for i in range(0, len(max(mG))):
ori_degree_seq.append(gD)
plt.fill_between(df.columns, df.mean() - df.sem(), df.mean() + df.sem(), color='blue', alpha=0.2, label="se")
h, = plt.plot(df.mean(), color='blue', aa=True, linewidth=4, ls='--', label="H*")
orig, = plt.plot(ori_degree_seq, color='black', linewidth=2, ls='-', label="H")
plt.title('Diameter Plot')
plt.ylabel('Diameter')
plt.xlabel('Growth')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
plt.legend([orig, h], ['$H$', 'HRG $H^*$'], loc=4)
# fig = plt.gcf()
# fig.set_size_inches(5, 4, forward=True)
plt.show()
def draw_graphlet_plot(orig_g, mG):
df = pd.DataFrame(mG)
width = .25
N = 11
dforig = pd.DataFrame(orig_g)
means = (dforig.mean()['e0'], dforig.mean()['e1'], dforig.mean()['e2'], dforig.mean()['e2c'], dforig.mean()['tri'],
dforig.mean()['p3'], dforig.mean()['star'], dforig.mean()['tritail'], dforig.mean()['square'],
dforig.mean()['squarediag'], dforig.mean()['k4'])
sem = (dforig.sem()['e0'], dforig.sem()['e1'], dforig.sem()['e2'], dforig.sem()['e2c'], dforig.sem()['tri'],
dforig.sem()['p3'], dforig.sem()['star'], dforig.sem()['tritail'], dforig.sem()['square'],
dforig.sem()['squarediag'], dforig.sem()['k4'])
ind = np.arange(N)
fig, ax = plt.subplots()
rects = ax.bar(ind + .02, means, width - .02, color='k', yerr=sem)
means = (df.mean()['e0'], df.mean()['e1'], df.mean()['e2'], df.mean()['e2c'], df.mean()['tri'], df.mean()['p3'],
df.mean()['star'], df.mean()['tritail'], df.mean()['square'], df.mean()['squarediag'], df.mean()['k4'])
sem = (
df.sem()['e0'], df.sem()['e1'], df.sem()['e2'], df.sem()['e2c'], df.sem()['tri'], df.sem()['p3'],
df.sem()['star'],
df.sem()['tritail'], df.sem()['square'], df.sem()['squarediag'], df.sem()['k4'])
rects = ax.bar(ind + width + .02, means, width - .02, color='b', yerr=sem)
plt.ylim(ymin=0)
# fig = plt.gcf()
# fig.set_size_inches(5, 3, forward=True)
plt.show()
def draw_degree_rank_plot(orig_g, mG):
ori_degree_seq = sorted(nx.degree(orig_g).values(), reverse=True) # degree sequence
deg_seqs = []
for newg in mG:
deg_seqs.append(sorted(nx.degree(newg).values(), reverse=True)) # degree sequence
df = pd.DataFrame(deg_seqs)
plt.xscale('log')
plt.yscale('log')
plt.fill_between(df.columns, df.mean() - df.sem(), df.mean() + df.sem(), color='blue', alpha=0.2, label="se")
h, = plt.plot(df.mean(), color='blue', aa=True, linewidth=4, ls='--', label="H*")
orig, = plt.plot(ori_degree_seq, color='black', linewidth=4, ls='-', label="H")
plt.title('Degree Distribution')
plt.ylabel('Degree')
plt.ylabel('Ordered Vertices')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
plt.legend([orig, h], ['$H$', 'HRG $H^*$'], loc=3)
# fig = plt.gcf()
# fig.set_size_inches(5, 4, forward=True)
plt.show()
def draw_network_value(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
"""
Network values: The distribution of eigenvector components (indicators of "network value")
associated to the largest eigenvalue of the graph adjacency matrix has also been found to be
skewed (Chakrabarti et al., 2004).
"""
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in orig_g_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "orig"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in pHRG_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "phrg"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in HRG_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "hrg"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in chunglu_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "cl"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in kron_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "kron"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
def degree_distribution_multiples(graphs):
if graphs is not None:
dorig = pd.DataFrame()
for g in graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
dorig = pd.concat([dorig, gb], axis=1) # Appends to bottom new DFs
return dorig
def hop_plot_multiples(graphs):
if graphs is not None:
m_hops_ar = []
for g in graphs:
c = get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
hops_df = pd.DataFrame(m_hops_ar)
return hops_df.transpose()
def clustering_coefficients_single(graph):
g = graph
dorig = pd.DataFrame()
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
#dorig = pd.concat([dorig, df])
return df
def clustering_coefficients_multiples(graphs):
if graphs is not None:
dorig = pd.DataFrame()
for g in graphs:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
return dorig
def assortativity_coefficients_multiples(graphs):
if len(graphs) is not 0:
dorig = pd.DataFrame()
for g in graphs:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
return dorig
def kcore_decomposition_multiples(graphs):
dorig = pd.DataFrame()
for g in graphs:
g.remove_edges_from(g.selfloop_edges())
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
return dorig
def eigenvector_multiples(graphs):
#
# dorig = pd.DataFrame()
# for g in graphs:
# # d = nx.eigenvector_centrality(g)
# d = nx.eigenvector_centrality_numpy(g)
# df = pd.DataFrame.from_dict(d.items())
# gb = df.groupby(by=[1])
# dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
# # print "orig"
# # print dorig.mean(axis=1)
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in graphs] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
return df
def network_properties(orig, net_mets, synth_graphs_lst, name='', out_tsv=False):
'''
compute network properties
orig: original graph
net_mets: network metrics list
graphs_lst: graphs to compute degree
out_tsv: if True output tsv file for PGFPlots
'''
results_d = {}
gs = gridspec.GridSpec(3,3)
ax0 = plt.subplot(gs[0, :])
ax1 = plt.subplot(gs[1, 0])
ax2 = plt.subplot(gs[1, 1])
ax3 = plt.subplot(gs[1, 2])
ax4 = plt.subplot(gs[2, 0])
ax5 = plt.subplot(gs[2, 1])
ax6 = plt.subplot(gs[2, 2])
plt.suptitle(name)
import os
if not os.path.exists('./Results'):
os.makedirs('./Results')
if 'degree' in net_mets:
print 'Degree'
orig__Deg = degree_distribution_multiples(orig)
orig__Deg.mean(axis=1).plot(ax=ax0,marker='.', ls="None", markeredgecolor="w", color='b')
synth_Deg = degree_distribution_multiples(synth_graphs_lst)
synth_Deg.to_csv('Results/degree_synth_{}.tsv'.format(name),sep='\t',header=None, index=False)
if os.path.exists('Results/degree_synth_{}.tsv'.format(name)): print 'saved to disk'
synth_Deg.mean(axis=1).plot(ax=ax0,alpha=0.5, color='r')
synth_Deg.max(axis=1).plot(ax=ax0,alpha=0.2, color='r')
synth_Deg.min(axis=1).plot(ax=ax0,alpha=0.2, color='r')
orig__Deg.mean(axis=1).to_csv('Results/degree_orig_{}.tsv'.format(name),sep='\t')
synth_Deg.mean(axis=1).to_csv('Results/degree_hrg_{}.tsv'.format(name),sep='\t')
ax0.set_title('Degree distributuion', y=0.9)
#ax0.set_xscale('log')
#ax0.set_yscale('log')
xdat = synth_Deg.index.values
ydat = synth_Deg.median(axis=1).values
zdat = synth_Deg.std(axis=1).values
df1 = pd.DataFrame()
df1['xdat'] = xdat
df1['ydat'] = ydat
df1['ysig'] = zdat
# df2 = pd.DataFrame()
# df2['s_med'] = zdat
# df2['s_std'] = wdat
# df = df1.join(df2, how='outer')
df1.to_csv('Results/deg_dist_{}.tsv'.format(name),sep='\t', header=None, index=False)
if os.path.exists('Results/deg_dist_{}.tsv'.format(name)):
print '... file written:','Results/deg_dist_{}.tsv'.format(name)
if 'hops' in net_mets:
print 'Hops'
orig__Hop_Plot = hop_plot_multiples(orig)
synth_Hop_Plot = hop_plot_multiples(synth_graphs_lst)
orig__Hop_Plot.mean(axis=1).plot(ax=ax1, marker='o', color='b')
synth_Hop_Plot.mean(axis=1).plot(ax=ax1, color='r')
synth_Hop_Plot.max(axis=1).plot(ax=ax1, color='r', alpha=0.2)
synth_Hop_Plot.min(axis=1).plot(ax=ax1, color='r', alpha=0.2)
ax1.set_title('Hop Plot', y=0.9)
if out_tsv: orig__Hop_Plot.mean(axis=1).to_csv('Results/hops_orig_{}.tsv'.format(name),sep='\t')
if out_tsv: synth_Hop_Plot.mean(axis=1).to_csv('Results/hops_hrg_{}.tsv'.format(name),sep='\t')
print (orig__Hop_Plot.mean(axis=1).to_string())
print (synth_Hop_Plot.mean(axis=1).to_string())
if 'clust' in net_mets:
print 'Clustering Coef'
orig__clust_coef = clustering_coefficients_multiples(orig)
synth_clust_coef = clustering_coefficients_multiples(synth_graphs_lst)
gb = orig__clust_coef.groupby(['k'])
gb['cc'].mean().plot(ax=ax2, marker='o', ls="None", markeredgecolor="w", color='b', alpha=0.8)
if out_tsv:
gb['cc'].mean().to_csv('Results/clust_orig_{}.tsv'.format(name),sep='\t')
print (gb['cc'].mean().to_string())
results_d['clust'] = {"orig": list(gb['cc'].mean())}
gb = synth_clust_coef.groupby(['k'])
gb['cc'].mean().plot(ax=ax2, marker='o', ls="None", markeredgecolor="w", color='r', alpha=0.8 )
ax2.set_title('Avg Clustering Coefficient', y=0.9)
if out_tsv:
gb['cc'].mean().to_csv('Results/clust_hrg_{}.tsv'.format(name),mode='a', sep='\t')
print (gb['cc'].mean().to_string())
results_d['clust'] = {"synth": list(gb['cc'].mean())}
return "end"
if 'assort' in net_mets:
print 'Assortativity'
orig__assort = assortativity_coefficients_multiples(orig)
synth_assort = assortativity_coefficients_multiples(synth_graphs_lst)
gb = orig__assort.groupby(['k'])
gb[1].mean().plot(ax=ax3, marker='o', ls="None", markeredgecolor="w", color='b', alpha=0.8 )
gb[1].mean().to_csv('Results/assort_orig_{}.tsv'.format(name),sep='\t')
gb = synth_assort.groupby(['k'])
gb[1].mean().plot(ax=ax3, marker='o', ls="None", markeredgecolor="w", color='r', alpha=0.8 )
ax3.set_title('Assortativity', y=0.9)
gb[1].mean().to_csv('Results/assort_hrg_{}.tsv'.format(name),sep='\t')
if 'kcore' in net_mets:
print 'kcore_decomposition'
orig__kcore = kcore_decomposition_multiples(orig)
synth_kcore = kcore_decomposition_multiples(synth_graphs_lst)
orig__kcore.plot(ax=ax4, marker='o', ls="None", markeredgecolor="w", color='b', alpha=0.8 )
synth_kcore.mean(axis=1).plot(ax=ax4, marker='o', ls="None", markeredgecolor="w", color='r', alpha=0.8 )
synth_kcore.max(axis=1).plot(ax=ax4, color='r', alpha=0.2 )
synth_kcore.min(axis=1).plot(ax=ax4, color='r', alpha=0.2 )
ax4.set_title('K-Core', y=0.9)
orig__kcore.to_csv('Results/kcore_orig_{}.tsv'.format(name),sep='\t')
synth_kcore.mean(axis=1).to_csv('Results/kcore_hrg_{}.tsv'.format(name),sep='\t')
if 'eigen' in net_mets:
print 'eigenvec'
orig__eigenvec = eigenvector_multiples(orig)
synth_eigenvec = eigenvector_multiples(synth_graphs_lst)
orig__eigenvec= orig__eigenvec.transpose()
orig__eigenvec.plot(ax=ax5, marker='o', ls="None", markeredgecolor="w", color='b', alpha=0.8)
orig__eigenvec.mean(axis=1).to_csv('Results/eigenv_orig_{}.tsv'.format(name),sep='\t')
synth_eigenvec= synth_eigenvec.transpose()
synth_eigenvec.mean(axis=1).plot(ax=ax5, marker='s', ls="None", markeredgecolor="w", color='r', alpha=0.8)
synth_eigenvec.mean(axis=1).to_csv('Results/eigenv_hrg_{}.tsv'.format(name),sep='\t')
ax5.set_title('eigenvector', y=0.9)
import pprint as pp
if 'gcd' in net_mets:
print 'GCD'
ax6.set_title('GCD', y=0.9)
gcd_hrg = []
df_g = external_rage(orig[0],name) # original graph
for synthG in synth_graphs_lst:
gcd_network = external_rage(synthG,name)
# rgfd = tijana_eval_rgfd(df_g, gcd_network) ## what is this?
gcm_g = tijana_eval_compute_gcm(df_g)
gcm_h = tijana_eval_compute_gcm(gcd_network)
gcd_hrg.append(tijana_eval_compute_gcd(gcm_g, gcm_h))
gcd_hrg_mean = np.mean(gcd_hrg)
gcd_hrg_std = np.std(gcd_hrg)
ax6.bar([1], gcd_hrg_mean, width=0.5, yerr=gcd_hrg_std)
# http://blog.bharatbhole.com/creating-boxplots-with-matplotlib/
# ax6.set_xticklabels(['HRG']) ## Custom x-axis labels
ax6.get_xaxis().tick_bottom() ## Remove top axes and right axes ticks
ax6.get_yaxis().tick_left()
ax6.set_xlim(0, 5)
if out_tsv:
with open ('Results/gcd_{}.tsv'.format(name), 'w') as f:
f.write('{}\t{}\n'.format(gcd_hrg_mean,gcd_hrg_std))
print ('{}\t{}\n'.format(gcd_hrg_mean,gcd_hrg_std))
"""oufigname = '/tmp/outfig_{}.pdf'.format(name)
plt.savefig(oufigname, bbox_inches='tight')
if os.path.exists(oufigname): print 'Output: ',oufigname
"""
return results_d
def draw_degree_probability_distribution(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
print 'draw_degree_probability_distribution'
if orig_g_M is not None:
dorig = pd.DataFrame()
for g in orig_g_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "orig"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
if HRG_M is not None:
dorig = pd.DataFrame()
for g in HRG_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "hrgm"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
if pHRG_M is not None:
dorig = pd.DataFrame()
for g in pHRG_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "phrgm"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in chunglu_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "cl"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/float(75)))
print zz, sa
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
#print len(kron_M), kron_M
for g in kron_M:
print "---=>",len(g)
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "kron"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/float(75)))
for x in range(1, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
def draw_eigenvector_probability_distribution(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
dorig = pd.DataFrame()
for g in orig_g_M:
d = nx.eigenvector_centrality(g)
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "orig"
print dorig.mean(axis=1)
dorig = pd.DataFrame()
for g in HRG_M:
d = nx.eigenvector_centrality(g)
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "hrgm"
print dorig.mean(axis=1)
dorig = pd.DataFrame()
for g in pHRG_M:
d = nx.eigenvector_centrality(g)
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "phrgm"
print dorig.mean(axis=1)
dorig = pd.DataFrame()
for g in chunglu_M:
d = nx.eigenvector_centrality(g)
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "cl"
print dorig.mean(axis=1)
dorig = pd.DataFrame()
for g in kron_M:
d = nx.eigenvector_centrality(g)
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "kron"
print dorig.mean(axis=1)
def draw_hop_plot(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
m_hops_ar = []
for g in chunglu_M:
c = get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
print "Chung Lu hops finished"
chunglu_df = pd.DataFrame(m_hops_ar)
m_hops_ar = []
for g in HRG_M:
c = get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
print "HRG hops finished"
hrg_df = pd.DataFrame(m_hops_ar)
m_hops_ar = []
for g in pHRG_M:
c = get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
print "PHRG hops finished"
phrg_df = pd.DataFrame(m_hops_ar)
m_hops_ar = []
for g in kron_M:
c = get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
print "Kron hops finished"
kron_df = pd.DataFrame(m_hops_ar)
## original plot
m_hops_ar = []
for g in orig_g_M:
c = get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
dorig = pd.DataFrame(m_hops_ar)
if 0:
# plt.fill_between(dorig.columns, dorig.mean() - dorig.sem(), dorig.mean() + dorig.sem(), color='black', alpha=0.2, label="se")
orig, = plt.plot(dorig.mean(), color='black', marker="o", markersize=10, aa=False, linewidth=3, ls='-', label="H")
print "Hop plot, BA (256, 3)"
print "H"
for x in range(0, len(dorig.mean().values)):
print "(" + str(dorig.mean().index[x]) + ", " + str(dorig.mean().values[x]) + ")"
# plt.fill_between(phrg_df.columns, phrg_df.mean() - phrg_df.sem(), phrg_df.mean() + phrg_df.sem(), color='blue', alpha=0.2, label="se")
phrg_h, = plt.plot(phrg_df.mean(), color='blue', marker="d", aa=False, linewidth=3, ls='-', label="PHRG")
print "PHRG"
for x in range(0, len(phrg_df.mean().values)):
print "(" + str(phrg_df.mean().index[x]) + ", " + str(phrg_df.mean().values[x]) + ")"
# plt.fill_between(hrg_df.columns, hrg_df.mean() - hrg_df.sem(), hrg_df.mean() + hrg_df.sem(), color='red', alpha=0.2, label="se")
hrg_h, = plt.plot(hrg_df.mean(), color='red', marker="^", aa=False, linewidth=3, ls='-', label="HRG")
print "HRG"
for x in range(0, len(hrg_df.mean().values)):
print "(" + str(hrg_df.mean().index[x]) + ", " + str(hrg_df.mean().values[x]) + ")"
# plt.fill_between(chunglu_df.columns, chunglu_df.mean() - chunglu_df.sem(), chunglu_df.mean() + chunglu_df.sem(), color='green', alpha=0.2, label="se")
cl_h, = plt.plot(chunglu_df.mean(), color='green', marker="v", aa=False, linewidth=3, ls='-', label="Chung-Lu")
print "CL"
for x in range(0, len(chunglu_df.mean().values)):
print "(" + str(chunglu_df.mean().index[x]) + ", " + str(chunglu_df.mean().values[x]) + ")"
if 0:
# plt.fill_between(kron_df.columns, kron_df.mean() - kron_df.sem(), kron_df.mean() + kron_df.sem(), color='purple', alpha=0.2, label="se")
kron_h, = plt.plot(kron_df.mean(), color='purple', marker="s", aa=False, linewidth=3, ls='-', label="Kronecker")
print "K"
for x in range(0, len(kron_df.mean().values)):
print "(" + str(kron_df.mean().index[x]) + ", " + str(kron_df.mean().values[x]) + ")"
if 0:
plt.title('Hop Plot')
plt.ylabel('Reachable Pairs')
plt.xlabel('Number of Hops')
# plt.ylim(ymax=max(dorig.values()) + max(dorig.values()) * .10)
plt.legend([orig, phrg_h, hrg_h, cl_h, kron_h], ['$H$', 'PHRG', 'HRG', 'Chung-Lu', 'Kron'], loc=1)
# fig = plt.gcf()
# fig.set_size_inches(5, 4, forward=True)
plt.show()
def draw_assortativity_coefficients(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
if len(orig_g_M) is not 0:
dorig = pd.DataFrame()
for g in orig_g_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "orig"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
if len(chunglu_M) is not 0:
dorig = pd.DataFrame()
for g in chunglu_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "cl"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
if len(HRG_M) is not 0:
dorig = pd.DataFrame()
for g in HRG_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "hrg"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
if len(pHRG_M) is not 0:
dorig = pd.DataFrame()
for g in pHRG_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "phrg"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
if len(kron_M) is not 0:
dorig = pd.DataFrame()
for g in kron_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "kron"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
def draw_clustering_coefficients(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
if len(orig_g_M) is not 0:
dorig = pd.DataFrame()
for g in orig_g_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "orig"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
if len(chunglu_M) is not 0:
dorig = pd.DataFrame()
for g in chunglu_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "cl"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
if len(HRG_M) is not 0:
dorig = pd.DataFrame()
for g in HRG_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "hrg"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
if len(pHRG_M) is not 0:
dorig = pd.DataFrame()
for g in pHRG_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "phrgm"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
if len(kron_M) is not 0:
dorig = pd.DataFrame()
for g in kron_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "kron"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
def draw_kcore_decomposition(orig_g_M, chunglu_M, HRG_M, pHRG_M, kron_M):
dorig = pd.DataFrame()
for g in orig_g_M:
g.remove_edges_from(g.selfloop_edges())
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "orig"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in pHRG_M:
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "phrg"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in HRG_M:
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "hrg"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in chunglu_M:
g.remove_edges_from(g.selfloop_edges())
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "cl"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in kron_M:
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "kron"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
def external_rage(G,netname):
import subprocess
import networkx as nx
from pandas import DataFrame
from os.path import expanduser
# giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
giant_nodes = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)
G = nx.subgraph(G, giant_nodes[0])
tmp_file = "tmp_{}.txt".format(netname)
with open(tmp_file, 'w') as tmp:
for e in G.edges():
tmp.write(str(int(e[0])+1) + ' ' + str(int(e[1])+1) + '\n')
# args = ("wine", "./RAGE.exe", tmp_file)
# args = ("/Volumes/Lexar/SeptBackupMBP/ToolSet/rage/Source_RAGE_unix/RAGE", tmp_file)
# args = ("/data/cpennycu/rage/Source_RAGE_unix/RAGE", tmp_file)
# args = ("/home/saguinag/Software/rage/Source_RAGE_unix/RAGE", tmp_file)
if "Users" in expanduser('~').split('/'):
args = ("/Users/saguinag/Research/rage/Source_RAGE_unix/RAGE", tmp_file)
else:
args = ("/home/saguinag/Software/rage/Source_RAGE_unix/RAGE", tmp_file)
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
# Results are hardcoded in the exe
df = DataFrame.from_csv("./Results/UNDIR_RESULTS_tmp_{}.csv".format(netname), header=0, sep=',', index_col=0)
df = df.drop('ASType', 1)
return df
def tijana_eval_rgfd(G_df, H_df):
T_G = 0.0
T_H = 0.0
RGFD = 0.0
for column in G_df:
T_G += sum(G_df[column][:])
for column in H_df:
T_H += sum(H_df[column][:])
for column in G_df:
N_G_i = sum(G_df[column][:])
N_H_i = sum(H_df[column][:])
if N_G_i == 0 or N_H_i == 0:
print 0;
RGFD += np.log10(N_G_i / T_G) - np.log10(N_H_i / T_H)
return RGFD
def tijana_eval_compute_gcm(G_df):
import scipy.stats
l = len(G_df.columns)
gcm = np.zeros((l, l))
i = 0
for column_G in G_df:
j = 0
for column_H in G_df:
gcm[i, j] = scipy.stats.spearmanr(G_df[column_G].tolist(), G_df[column_H].tolist())[0]
if scipy.isnan(gcm[i, j]):
gcm[i, j] = 1.0
j += 1
i += 1
return gcm
def tijana_eval_compute_gcd(gcm_g, gcm_h):
import math
if len(gcm_h) != len(gcm_g):
raise "Graphs must be same size"
s = 0
for i in range(0, len(gcm_g)):
for j in range(i, len(gcm_h)):
s += math.pow((gcm_g[i, j] - gcm_h[i, j]), 2)
gcd = math.sqrt(s)
return gcd
def save_degree_probability_distribution(orig_g_M, chunglu_M, pHRG_M, kron_M,in_graph_str=''):
from datetime import datetime
dorig = pd.DataFrame()
for g in orig_g_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
gb.columns=['cnt']
gb['k']=gb.index
print gb.head()
dorig = pd.concat([dorig, gb], axis=1) # Appends to bottom new DFs
print "orig"
if not dorig.empty :
dorig['pk'] = dorig['cnt']/float(g.number_of_nodes())
out_path = '../Results/orig_kdist_{}.tsv'.format(in_graph_str)
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
# dorig = pd.DataFrame()
# for g in HRG_M:
# d = g.degree()
# df = pd.DataFrame.from_dict(d.items())
# gb = df.groupby(by=[1])
# dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
# print "hrgm"
# if not dorig.empty :
# zz = len(dorig.mean(axis=1).values)
# sa = int(math.ceil(zz/75))
# for x in range(0, len(dorig.mean(axis=1).values), sa):
# print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in pHRG_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
gb.columns=['cnt']
# gb['k']=gb.index
dorig = pd.concat([dorig, gb], axis=1) # Appends to bottom new DFs
print "phrgm"
if not dorig.empty :
"""
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
"""
dorig['pk'] = dorig.mean(axis=1)/float(g.number_of_nodes())
dorig['k'] = dorig.index # print dorig.head()
out_path = '../Results/phrg_kdist_{}.tsv'.format(in_graph_str)
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
dorig = pd.DataFrame()
for g in chunglu_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
gb.columns=['cnt']
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "cl"
if not dorig.empty :
"""
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
"""
dorig['pk'] = dorig.mean(axis=1)/float(g.number_of_nodes())
dorig['k'] = dorig.index # print dorig.head()
out_path = '../Results/clgm_kdist_{}.tsv'.format(in_graph_str)
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
dorig = pd.DataFrame()
for g in kron_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
gb.columns=['cnt']
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "kron"
if not dorig.empty :
"""
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
"""
dorig['pk'] = dorig.mean(axis=1)/float(g.number_of_nodes())
dorig['k'] = dorig.index # print dorig.head()
out_path = '../Results/kpgm_kdist_{}.tsv'.format(in_graph_str)#str(datetime.now()).replace(' ','_'))
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
#save_eigenvector_centrality
def save_eigenvector_centrality(orig_g_M, chunglu_M, pHRG_M, kron_M,in_graph_str=''):
#from datetime import datetime
dorig = pd.DataFrame()
for g in orig_g_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
gb.columns=['cnt']
gb['k']=gb.index
print gb.head()
dorig = pd.concat([dorig, gb], axis=1) # Appends to bottom new DFs
print "orig"
if not dorig.empty :
dorig['pk'] = dorig['cnt']/float(g.number_of_nodes())
out_path = '../Results/orig_kdist_{}.tsv'.format(in_graph_str)
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
# dorig = pd.DataFrame()
# for g in HRG_M:
# d = g.degree()
# df = pd.DataFrame.from_dict(d.items())
# gb = df.groupby(by=[1])
# dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
# print "hrgm"
# if not dorig.empty :
# zz = len(dorig.mean(axis=1).values)
# sa = int(math.ceil(zz/75))
# for x in range(0, len(dorig.mean(axis=1).values), sa):
# print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in pHRG_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
gb.columns=['cnt']
# gb['k']=gb.index
dorig = pd.concat([dorig, gb], axis=1) # Appends to bottom new DFs
print "phrgm"
if not dorig.empty :
"""
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
"""
dorig['pk'] = dorig.mean(axis=1)/float(g.number_of_nodes())
dorig['k'] = dorig.index # print dorig.head()
out_path = '../Results/phrg_kdist_{}.tsv'.format(in_graph_str)
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
dorig = pd.DataFrame()
for g in chunglu_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
gb.columns=['cnt']
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "cl"
if not dorig.empty :
"""
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
"""
dorig['pk'] = dorig.mean(axis=1)/float(g.number_of_nodes())
dorig['k'] = dorig.index # print dorig.head()
out_path = '../Results/clgm_kdist_{}.tsv'.format(in_graph_str)
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
dorig = pd.DataFrame()
for g in kron_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
gb.columns=['cnt']
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "kron"
if not dorig.empty :
"""
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
"""
dorig['pk'] = dorig.mean(axis=1)/float(g.number_of_nodes())
dorig['k'] = dorig.index # print dorig.head()
out_path = '../Results/kpgm_kdist_{}.tsv'.format(in_graph_str)#str(datetime.now()).replace(' ','_'))
dorig[['k','pk']].to_csv(out_path, sep='\t', index=False, header=True)
| gpl-3.0 |
johnveitch/cpnest | docs/conf.py | 1 | 5836 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CPNest documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 7 13:57:03 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
#import os
#import sys
#sys.path.insert(0, os.path.abspath('../cpnest'))
import cpnest
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CPNest'
copyright = '2017-2021, W. Del Pozzo, J. Veitch'
author = 'W. Del Pozzo, J. Veitch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cpnest.__version__
# The full version, including alpha/beta/rc tags.
release = cpnest.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CPNestdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CPNest.tex', 'CPNest Documentation',
'W. Del Pozzo, J. Veitch', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cpnest', 'CPNest Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CPNest', 'CPNest Documentation',
author, 'CPNest', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None,
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.sourceforge.net', None)
}
| mit |
zhangns/bilibili-captcha | helper.py | 2 | 3775 | # Generic helper functions
import time
import random
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import scipy as sp
import scipy.misc
# Color map for grayscale images
cm_greys = plt.cm.get_cmap('Greys')
# A generic function timer
def time_func(tag, func):
t0 = time.time() if tag else None
ret = func()
if tag:
t1 = time.time()
print('Time for {}: {}'.format(tag, t1 - t0))
return ret
# Compose a single-argument function n times
def repeat(func, n):
def ret(x):
for i in range(n):
x = func(x)
return x
return ret
# E.g. _sort_by_occurrence(np.array([1, 3, 3, 1, 2, 2, 2, 3, 4, 2]))
# Return: array([2, 3, 1, 4])
def sort_by_occurrence(arr):
u, counts = np.unique(arr, return_counts=True)
sort_index = counts.argsort()[::-1]
return u[sort_index]
# # https://en.wikipedia.org/wiki/Von_Neumann_neighborhood
# def manhattan_neighbors(r=1):
# neighbors = []
# for dy in range(-r, r + 1):
# xx = r - abs(dy)
# for dx in range(-xx, xx + 1):
# if dy == 0 and dx == 0:
# continue
# neighbors.append((dy, dx))
# return neighbors
# Show image in matplotlib window
def show_image(img, cmap=cm_greys, title=None, interp=None):
plt.clf()
plt.axis('off')
plt.imshow(img, cmap=cmap, interpolation=interp)
if title:
plt.title(title)
plt.show()
# https://en.wikipedia.org/wiki/Lennard-Jones_potential
def _lj(r, delta=4):
return np.power(delta / r, 12) - 2 * np.power(delta / r, 6)
# https://en.wikipedia.org/wiki/Simulated_annealing
# Not useful to the program, but produces impressive result
def anneal(img, num_steps=1000):
np.seterr(divide='ignore', invalid='ignore')
height, width = img.shape
new_img = np.zeros((height, width, 3))
for i in range(3):
new_img[:, :, i] = 1 - img.copy()
positions = []
for y in range(height):
for x in range(width):
if img[y, x] == 1:
new_img[y, x, 0] = 1
positions.append((y, x))
positions = np.array(positions)
num_positions = positions.shape[0]
print('{} Positions'.format(num_positions))
particles = np.ones(num_positions, dtype=bool)
# plt.ion()
# show_image(new_img)
E = 0
# step_list= []
# E_list = []
# for p in range(num_positions):
# for q in range(p + 1, num_positions):
# E += _lj(la.norm(positions[q] - positions[p]))
for step in range(num_steps):
beta = (3 + step / 1000) * 1e-6
# Choose a position randomly, and invert the state
p = np.random.randint(num_positions)
y, x = positions[p]
# noinspection PyTypeChecker
delta_energy = np.nansum(
_lj(la.norm(positions[particles] - positions[p], axis=1)))
if particles[p]:
delta_energy = -delta_energy
if delta_energy < 0:
accept = True
else:
accept = (random.random() < np.exp(-beta * delta_energy))
if accept:
E += delta_energy
particles[p] = not particles[p]
new_img[y, x, 0] = particles[p]
if step % 50 == 0:
print('Step {}. beta {}. E {}'.format(step, beta, E))
# step_list.append(step)
# E_list.append(E)
# show_image(new_img, title=step, interp='none')
# plt.pause(0.1)
# plt.ioff()
# plt.clf()
# plt.plot(step_list, E_list, '*-')
# plt.xlabel('step')
# plt.ylabel('Energy')
# plt.show()
return new_img
# resize an image to a new size
def resize_image(image, height, width):
return sp.misc.imresize(
image,
(height, width)
)
| mit |
meduz/scikit-learn | examples/model_selection/randomized_search.py | 35 | 3287 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
| bsd-3-clause |
theoryno3/scikit-learn | sklearn/datasets/base.py | 9 | 18347 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
yyjiang/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/utils/tests/test_validation.py | 56 | 18600 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted,
check_consistent_length,
)
from sklearn.exceptions import NotFittedError
from sklearn.exceptions import DataConversionWarning
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
rs2/pandas | pandas/tests/arithmetic/test_interval.py | 1 | 10291 | import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas import (
Categorical,
Index,
Interval,
IntervalIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),
(Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),
(
timedelta_range("0 days", periods=3).insert(4, pd.NaT),
timedelta_range("1 day", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3).insert(4, pd.NaT),
date_range("20170102", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3, tz="US/Eastern").insert(4, pd.NaT),
date_range("20170102", periods=3, tz="US/Eastern").insert(4, pd.NaT),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
@pytest.fixture
def array(left_right_dtypes):
"""
Fixture to generate an IntervalArray of various dtypes containing NA if possible
"""
left, right = left_right_dtypes
return IntervalArray.from_arrays(left, right)
def create_categorical_intervals(left, right, closed="right"):
return Categorical(IntervalIndex.from_arrays(left, right, closed))
def create_series_intervals(left, right, closed="right"):
return Series(IntervalArray.from_arrays(left, right, closed))
def create_series_categorical_intervals(left, right, closed="right"):
return Series(Categorical(IntervalIndex.from_arrays(left, right, closed)))
class TestComparison:
@pytest.fixture(params=[operator.eq, operator.ne])
def op(self, request):
return request.param
@pytest.fixture(
params=[
IntervalArray.from_arrays,
IntervalIndex.from_arrays,
create_categorical_intervals,
create_series_intervals,
create_series_categorical_intervals,
],
ids=[
"IntervalArray",
"IntervalIndex",
"Categorical[Interval]",
"Series[Interval]",
"Series[Categorical[Interval]]",
],
)
def interval_constructor(self, request):
"""
Fixture for all pandas native interval constructors.
To be used as the LHS of IntervalArray comparisons.
"""
return request.param
def elementwise_comparison(self, op, array, other):
"""
Helper that performs elementwise comparisons between `array` and `other`
"""
other = other if is_list_like(other) else [other] * len(array)
return np.array([op(x, y) for x, y in zip(array, other)])
def test_compare_scalar_interval(self, op, array):
# matches first interval
other = array[0]
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# matches on a single endpoint but not both
other = Interval(array.left[0], array.right[1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = Interval(0, 1, closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_na(self, op, array, nulls_fixture, request):
result = op(array, nulls_fixture)
expected = self.elementwise_comparison(op, array, nulls_fixture)
if nulls_fixture is pd.NA and array.dtype != pd.IntervalDtype("int64"):
mark = pytest.mark.xfail(
reason="broken for non-integer IntervalArray; see GH 31882"
)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
0,
1.0,
True,
"foo",
Timestamp("2017-01-01"),
Timestamp("2017-01-01", tz="US/Eastern"),
Timedelta("0 days"),
Period("2017-01-01", "D"),
],
)
def test_compare_scalar_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval(self, op, array, interval_constructor):
# same endpoints
other = interval_constructor(array.left, array.right)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# different endpoints
other = interval_constructor(array.left[::-1], array.right[::-1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# all nan endpoints
other = interval_constructor([np.nan] * 4, [np.nan] * 4)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval_mixed_closed(
self, op, interval_constructor, closed, other_closed
):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = interval_constructor(range(2), range(1, 3), closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
(
Interval(0, 1),
Interval(Timedelta("1 day"), Timedelta("2 days")),
Interval(4, 5, "both"),
Interval(10, 20, "neither"),
),
(0, 1.5, Timestamp("20170103"), np.nan),
(
Timestamp("20170102", tz="US/Eastern"),
Timedelta("2 days"),
"baz",
pd.NaT,
),
],
)
def test_compare_list_like_object(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_nan(self, op, array, nulls_fixture, request):
other = [nulls_fixture] * 4
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
if nulls_fixture is pd.NA and array.dtype.subtype != "i8":
reason = "broken for non-integer IntervalArray; see GH 31882"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
np.arange(4, dtype="int64"),
np.arange(4, dtype="float64"),
date_range("2017-01-01", periods=4),
date_range("2017-01-01", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
period_range("2017-01-01", periods=4, freq="D"),
Categorical(list("abab")),
Categorical(date_range("2017-01-01", periods=4)),
pd.array(list("abcd")),
pd.array(["foo", 3.14, None, object()]),
],
ids=lambda x: str(x.dtype),
)
def test_compare_list_like_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("length", [1, 3, 5])
@pytest.mark.parametrize("other_constructor", [IntervalArray, list])
def test_compare_length_mismatch_errors(self, op, other_constructor, length):
array = IntervalArray.from_arrays(range(4), range(1, 5))
other = other_constructor([Interval(0, 1)] * length)
with pytest.raises(ValueError, match="Lengths must match to compare"):
op(array, other)
@pytest.mark.parametrize(
"constructor, expected_type, assert_func",
[
(IntervalIndex, np.array, tm.assert_numpy_array_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_index_series_compat(self, op, constructor, expected_type, assert_func):
# IntervalIndex/Series that rely on IntervalArray for comparisons
breaks = range(4)
index = constructor(IntervalIndex.from_breaks(breaks))
# scalar comparisons
other = index[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = breaks[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
# list-like comparisons
other = IntervalArray.from_breaks(breaks)
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = [index[0], breaks[0], "foo"]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH #28981
expected = Series([False, False])
s = pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")
result = s == scalars
tm.assert_series_equal(result, expected)
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/covariance/tests/test_robust_covariance.py | 9 | 3845 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises, assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'fast_mcd expects at least 2 samples',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'MinCovDet expects at least 2 samples',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| mit |
jts/nanopolish | scripts/polya_training/hmmplot.py | 1 | 4743 | #! /usr/bin/env python3
"""
Plot a random segmentation from a dataset.
Usage:
$ python polya.out.tsv reads.fastq.readdb.index
"""
import h5py
import pandas as pd
import numpy as np
import argparse
import os
from random import choice
from collections import OrderedDict
# plotting libraries:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
def load_fast5_signal(read_path):
"""Load a fast5 signal from read path; return as numpy array."""
read_h5 = h5py.File(read_path, 'r')
# get scaling parameters:
offset = read_h5['UniqueGlobalKey']['channel_id'].attrs['offset']
digitisation = read_h5['UniqueGlobalKey']['channel_id'].attrs['digitisation']
read_range = read_h5['UniqueGlobalKey']['channel_id'].attrs['range']
# get raw integer-encoded signal:
rn = list(read_h5['Raw']['Reads'].keys())[0]
signal = (read_range / digitisation) * (np.array(read_h5['Raw']['Reads'][rn]['Signal']) + offset)
# close hdf object and return numpy signal:
read_h5.close()
return signal
def get_state_names(header):
"""Return a list of state-start columns in the header. E.g., `[leader_start, adapter_start, ..., transcript_start]`."""
return list(filter(lambda name: (name[-6:] == '_start'), header))
def generate_color_palette(num_colors):
"""Generate a list (of length `num_colors`) of color IDs for matplotlib."""
# TODO(this is a hack-ish solution. Generate it mathematically!!)
colors = ['cyan','yellow','red','green','blue', 'orange', 'green']
return colors[:num_colors]
def main(args):
"""Filter-in PASS-ing segmentations and plot a random segmented read to file."""
# load dataframes:
polya = pd.read_csv(args.polya_tsv, sep='\t')
readdb = pd.read_csv(args.readdb, sep='\t', header=None, names=['readname','location'])
# get the names of all state-index columns:
state_starts = get_state_names(polya.columns.values.tolist())
# get a random read, its segmentation, and its location:
if (args.read is None):
row_values = choice(polya[polya['qc_tag'] == 'PASS'][['readname', *state_starts]].values).tolist()
read_id = row_values.pop(0)
state_start_indices = OrderedDict()
for k in range(len(state_starts)):
state_start_indices[state_starts[k]] = row_values[k]
read_path = readdb[readdb['readname'] == read_id].values[0][1]
else:
try:
read_df = polya[polya['readname'] == args.read]
row_values = choice(read_df[read_df['qc_tag'] == 'PASS'][['readname', *state_starts]].values).tolist()
read_id = row_values.pop(0)
state_start_indices = OrderedDict()
for k in range(len(state_starts)):
state_start_indices[state_starts[k]] = row_values[k]
read_path = readdb[readdb['readname'] == read_id].values[0][1]
except:
raise Exception("[hmmplot.py] read id={} could not be resolved".format(args.read))
# load fast5 file:
signal = load_fast5_signal(read_path)
# create dictionary of start-stop indices for each region:
start_stop_indices = {}
stop_idxs = [state_start_indices[name] for name in state_starts[1:]] + [signal.shape[0]]
colors = generate_color_palette(len(state_start_indices))
for n, (name, start_idx) in enumerate(state_start_indices.items()):
start_stop_indices[name] = ( start_idx, stop_idxs[n], colors[n] )
# make segmentation plot:
plt.figure(figsize=(18,6))
plt.plot(signal)
for k, v in start_stop_indices.items():
plt.axvspan(v[0], v[1], color=v[2], alpha=0.35, label=k[:-6])
plt.legend(loc='best')
plt.xlim(0, signal.shape[0])
plt.title("Segmentation: {}".format(read_id))
plt.xlabel("Sample Index (3' to 5')")
plt.ylabel("Current (pA)")
if (args.out is None):
plt.savefig("segmentation.{}.png".format(read_id))
else:
plt.savefig(args.out)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot a random passing segmentation from a polya output file.")
parser.add_argument("polya_tsv", help="Output TSV of `nanopolish polya {...}`")
parser.add_argument("readdb", help="ReadDB index file from `nanopolish index {...}`")
parser.add_argument("--out", default=None, help="Where to put the output file. [./segmentation.<READ_ID>.png]")
parser.add_argument("--read", default=None, help="Visualize a specific read. [random read]")
args = parser.parse_args()
assert(os.path.exists(args.polya_tsv)), "[ERR] {} does not exist".format(args.polya_tsv)
assert(os.path.exists(args.readdb)), "[ERR] {} does not exist".format(args.readdb)
main(args)
| mit |
ssaeger/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 84 | 1221 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
dikien/Machine-Learning-Newspaper | nytimes/step4_analysis_unsupervised_3.py | 1 | 3101 | # -*- coding: UTF-8 -*-
from sklearn.metrics import accuracy_score
from time import time
import numpy as np
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import preprocessing
from sklearn.feature_selection import SelectPercentile, f_classif, chi2
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.metrics import accuracy_score
def plot(data):
colors = cycle('rgbcmykw')
algorithm = sorted(data)
fig = plt.figure()
ax = fig.add_subplot(111)
for c in zip(colors):
ax.plot([0], data, label=j, color=c)
ax.scatter([0], data, color=c)
plt.xlabel("#-Features(SelectPercentile)")
plt.ylabel("Accuracy")
plt.title("Accuracy vs #-Features for different classifiers")
# ax.set_xscale("log")
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3,
box.width, box.height * 0.7])
ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=3)
plt.legend(loc=2)
plt.show()
def preprocess(article_file, lable_file):
features = pickle.load(open(article_file))
features = np.array(features)
# transform non-numerical labels (as long as they are hashable and comparable) to numerical labels
lables = pickle.load(open(lable_file))
le = preprocessing.LabelEncoder()
le.fit(lables)
lables = le.transform(lables)
# print le.inverse_transform([0])
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, min_df=1,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features)
# selector : chi2
selector = SelectPercentile(score_func=chi2)
selector.fit(features_train_transformed, lables)
features_train_transformed = selector.transform(features_train_transformed).toarray()
return features_train_transformed, lables, vectorizer, selector, le, features
data = []
features, labels, vectorizer, selector, le, features_data = preprocess("pkl/article_2_people.pkl", "pkl/lable_2_people.pkl")
for name, clf in [
# ('k_means', KMeans(n_clusters=2, n_init=5)),
# ('SpectralClustering', SpectralClustering(n_clusters=2, n_init=5)),
# ('AgglomerativeClustering_ward', AgglomerativeClustering(n_clusters=2, linkage='ward')),
# ('AgglomerativeClustering_complete', AgglomerativeClustering(n_clusters=2, linkage='complete')),
('AgglomerativeClustering_average', AgglomerativeClustering(n_clusters=2, linkage='average'))
]:
print "*" * 100
print('Method: {}'.format(name))
# Fit on the whole data:
t0 = time()
y_pred = clf.fit(features).labels_
print "fit time:", round(time()-t0, 3), "s"
score_accuracy = accuracy_score(y_pred, labels, normalize=True)
print('accuracy score on training: {}'.format(score_accuracy))
print "*"* 100
data.append(score_accuracy)
print data
plot(data) | bsd-3-clause |
liangz0707/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/python/estimator/inputs/inputs.py | 20 | 1086 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
# pylint: enable=unused-import,line-too-long
| apache-2.0 |
UNR-AERIAL/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/backends/qt_editor/formlayout.py | 11 | 19911 | # -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
# History:
# 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid)
# 1.0.7: added support for "Apply" button
# 1.0.6: code cleaning
__version__ = '1.0.10'
__license__ = __doc__
DEBUG = False
import sys
STDERR = sys.stderr
from matplotlib.colors import is_color_like
from matplotlib.colors import rgb2hex
from matplotlib.colors import colorConverter
from matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore
if not hasattr(QtWidgets, 'QFormLayout'):
raise ImportError("Warning: formlayout requires PyQt4 >v4.3 or PySide")
import datetime
def col2hex(color):
"""Convert matplotlib color to hex before passing to Qt"""
return rgb2hex(colorConverter.to_rgb(color))
class ColorButton(QtWidgets.QPushButton):
"""
Color choosing push button
"""
colorChanged = QtCore.Signal(QtGui.QColor)
def __init__(self, parent=None):
QtWidgets.QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QtCore.QSize(12, 12))
self.clicked.connect(self.choose_color)
self._color = QtGui.QColor()
def choose_color(self):
color = QtWidgets.QColorDialog.getColor(self._color, self.parentWidget(), '')
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@QtCore.Slot(QtGui.QColor)
def set_color(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit(self._color)
pixmap = QtGui.QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QtGui.QIcon(pixmap))
color = QtCore.Property(QtGui.QColor, get_color, set_color)
def col2hex(color):
"""Convert matplotlib color to hex before passing to Qt"""
return rgb2hex(colorConverter.to_rgb(color))
def to_qcolor(color):
"""Create a QColor from a matplotlib color"""
qcolor = QtGui.QColor()
color = str(color)
try:
color = col2hex(color)
except ValueError:
#print('WARNING: ignoring invalid color %r' % color)
return qcolor # return invalid QColor
qcolor.setNamedColor(color) # set using hex color
return qcolor # return valid QColor
class ColorLayout(QtWidgets.QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QtWidgets.QHBoxLayout.__init__(self)
assert isinstance(color, QtGui.QColor)
self.lineedit = QtWidgets.QLineEdit(color.name(), parent)
self.lineedit.editingFinished.connect(self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
self.colorbtn.colorChanged.connect(self.update_text)
self.addWidget(self.colorbtn)
def update_color(self):
color = self.text()
qcolor = to_qcolor(color)
self.colorbtn.color = qcolor # defaults to black if not qcolor.isValid()
def update_text(self, color):
self.lineedit.setText(color.name())
def text(self):
return self.lineedit.text()
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QtGui.QFontDatabase().families()
if six.text_type(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not isinstance(tup, tuple) or len(tup) != 4 \
or not font_is_installed(tup[0]) \
or not isinstance(tup[1], int) \
or not isinstance(tup[2], bool) \
or not isinstance(tup[3], bool):
return None
font = QtGui.QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (six.text_type(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QtWidgets.QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QtWidgets.QGridLayout.__init__(self)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QtWidgets.QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QtWidgets.QComboBox(parent)
self.size.setEditable(True)
sizelist = list(xrange(6, 12)) + list(xrange(12, 30, 2)) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QtWidgets.QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QtWidgets.QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def is_edit_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QtGui.QDoubleValidator.Acceptable
class FormWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, data, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
from copy import deepcopy
self.data = deepcopy(data)
self.widgets = []
self.formlayout = QtWidgets.QFormLayout(self)
if comment:
self.formlayout.addRow(QtWidgets.QLabel(comment))
self.formlayout.addRow(QtWidgets.QLabel(" "))
if DEBUG:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QtWidgets.QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
for label, value in self.data:
if DEBUG:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
self.formlayout.addRow(QtWidgets.QLabel(" "), QtWidgets.QLabel(" "))
self.widgets.append(None)
continue
elif label is None:
# Comment
self.formlayout.addRow(QtWidgets.QLabel(value))
self.widgets.append(None)
continue
elif tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
elif is_color_like(value):
field = ColorLayout(to_qcolor(value), self)
elif isinstance(value, six.string_types):
field = QtWidgets.QLineEdit(value, self)
elif isinstance(value, (list, tuple)):
if isinstance(value, tuple):
value = list(value)
selindex = value.pop(0)
field = QtWidgets.QComboBox(self)
if isinstance(value[0], (list, tuple)):
keys = [key for key, _val in value]
value = [val for _key, val in value]
else:
keys = value
field.addItems(value)
if selindex in value:
selindex = value.index(selindex)
elif selindex in keys:
selindex = keys.index(selindex)
elif not isinstance(selindex, int):
print("Warning: '%s' index is invalid (label: "
"%s, value: %s)" % (selindex, label, value), file=STDERR)
selindex = 0
field.setCurrentIndex(selindex)
elif isinstance(value, bool):
field = QtWidgets.QCheckBox(self)
if value:
field.setCheckState(QtCore.Qt.Checked)
else:
field.setCheckState(QtCore.Qt.Unchecked)
elif isinstance(value, float):
field = QtWidgets.QLineEdit(repr(value), self)
field.setValidator(QtGui.QDoubleValidator(field))
dialog = self.get_dialog()
dialog.register_float_field(field)
field.textChanged.connect(lambda text: dialog.update_buttons())
elif isinstance(value, int):
field = QtWidgets.QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QtWidgets.QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QtWidgets.QDateEdit(self)
field.setDate(value)
else:
field = QtWidgets.QLineEdit(repr(value), self)
self.formlayout.addRow(label, field)
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif isinstance(value, six.string_types) or is_color_like(value):
value = six.text_type(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], (list, tuple)):
value = value[index][0]
else:
value = value[index]
elif isinstance(value, bool):
value = field.checkState() == QtCore.Qt.Checked
elif isinstance(value, float):
value = float(str(field.text()))
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime().toPyDateTime()
elif isinstance(value, datetime.date):
value = field.date().toPyDate()
else:
value = eval(str(field.text()))
valuelist.append(value)
return valuelist
class FormComboWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.combobox = QtWidgets.QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QtWidgets.QStackedWidget(self)
layout.addWidget(self.stackwidget)
self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex)
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormTabWidget(QtWidgets.QWidget):
update_buttons = QtCore.Signal()
def __init__(self, datalist, comment="", parent=None):
QtWidgets.QWidget.__init__(self, parent)
layout = QtWidgets.QVBoxLayout()
self.tabwidget = QtWidgets.QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0]) == 3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append(widget)
def setup(self):
for widget in self.widgetlist:
widget.setup()
def get(self):
return [widget.get() for widget in self.widgetlist]
class FormDialog(QtWidgets.QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="",
icon=None, parent=None, apply=None):
QtWidgets.QDialog.__init__(self, parent)
self.apply_callback = apply
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment,
parent=self)
elif len(data[0]) == 3:
self.formwidget = FormComboWidget(data, comment=comment,
parent=self)
else:
self.formwidget = FormWidget(data, comment=comment,
parent=self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok
| QtWidgets.QDialogButtonBox.Cancel)
self.formwidget.update_buttons.connect(self.update_buttons)
if self.apply_callback is not None:
apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply)
apply_btn.clicked.connect(self.apply)
bbox.accepted.connect(self.accept)
bbox.rejected.connect(self.reject)
layout.addWidget(bbox)
self.setLayout(layout)
self.setWindowTitle(title)
if not isinstance(icon, QtGui.QIcon):
icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def update_buttons(self):
valid = True
for field in self.float_fields:
if not is_edit_valid(field):
valid = False
for btn_type in (QtWidgets.QDialogButtonBox.Ok, QtWidgets.QDialogButtonBox.Apply):
btn = self.bbox.button(btn_type)
if btn is not None:
btn.setEnabled(valid)
def accept(self):
self.data = self.formwidget.get()
QtWidgets.QDialog.accept(self)
def reject(self):
self.data = None
QtWidgets.QDialog.reject(self)
def apply(self):
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
data: datalist, datagroup
title: string
comment: string
icon: QIcon instance
parent: parent QWidget
apply: apply callback (function)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
-> one field for each member of a datalist
-> one tab for each member of a top-level datagroup
-> one page (of a multipage widget, each page can be selected with a combo
box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g., if the module is used directly from the interpreter)
if QtWidgets.QApplication.startingUp():
_app = QtWidgets.QApplication([])
dialog = FormDialog(data, title, comment, icon, parent, apply)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
def create_datalist_example():
return [('str', 'this is a string'),
('list', [0, '1', '3', '4']),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, 'Other:'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('datetime', datetime.datetime(2010, 10, 10)),
]
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment"))
#--------- datalist example
datalist = create_datalist_example()
def apply_test(data):
print("data:", data)
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=apply_test))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title"))
| mit |
xingnix/learning | imageprocessing/python/6/colorimage.py | 1 | 6608 | import matplotlib.pyplot as plt
import numpy as np
from skimage import io,color,data,filters,exposure,util
#plt.switch_backend('qt5agg')
def pseudocolor():
image=io.imread('timg.jpeg')
imagergb=image.copy()
imagergb[:,:,0]=np.interp(image[:,:,0],[0,50,100,150,200,255],[0,0,0,55,155,255])
imagergb[:,:,1]=np.interp(image[:,:,0],[0,50,100,150,200,255],[0,155,255,155,0,0])
imagergb[:,:,2]=np.interp(image[:,:,0],[0,50,100,150,200,255],[255,155,55,0,0,0])
io.imsave('timgcolor.png',imagergb)
#plt.plot(range(256),lutr,'r',np.array(range(256)),lutg,'g',range(256),lutb,'b')
lutr=np.uint8(np.interp(range(256),[0,50,100,150,200,255],[0,0,0,55,155,255]))
lutg=np.uint8(np.interp(range(256),[0,50,100,150,200,255],[0,155,255,155,0,0]))
lutb=np.uint8(np.interp(range(256),[0,50,100,150,200,255],[255,155,55,0,0,0]))
#shuffled gray scale but the rgb image is not affected because look up table is changed
lutgray=np.array(range(256))
lutshuffle=lutgray.copy()
np.random.shuffle(lutshuffle)
shuffledlutr=lutr.copy()
shuffledlutr[lutshuffle]=lutr
shuffledlutg=lutg.copy()
shuffledlutg[lutshuffle]=lutg
shuffledlutb=lutb.copy()
shuffledlutb[lutshuffle]=lutb
shuffled=np.uint8(np.interp(image[:,:,0],lutgray,lutshuffle))
io.imsave('shuffled.png',shuffled)
shuffledrgb=image.copy()
shuffledrgb[:,:,0]=np.interp(shuffled,lutgray,shuffledlutr)
shuffledrgb[:,:,1]=np.interp(shuffled,lutgray,shuffledlutg)
shuffledrgb[:,:,2]=np.interp(shuffled,lutgray,shuffledlutb)
def color_transformation():
# 彩色变换
image=data.coffee()
brighter=np.uint8(image*0.5+255*0.5)
darker=np.uint8(image*0.5)
io.imshow(brighter)
io.show()
io.imshow(darker)
io.show()
def color_complements():
# 补色
image=data.coffee()
invert=255-image
io.imshow(invert)
io.show()
def color_slicing():
# 彩色分层
image=io.imread('trafficlight.png')
segred=image.copy()
seggreen=image.copy()
segyellow=image.copy()
maskred=(image[:,:,0]>100) & (image[:,:,1]<50 ) & (image[:,:,2]<50)
maskgreen=(image[:,:,0]<100) & (image[:,:,1]>100 ) & (image[:,:,2]<100)
maskyellow=(image[:,:,0]>100) & (image[:,:,1]>100 ) & (image[:,:,2]<70)
segred[:,:,0]*=maskred
segred[:,:,1]*=maskred
segred[:,:,2]*=maskred
io.imshow(segred)
io.imsave('lightred.png',segred)
io.show()
seggreen[:,:,0]*=maskgreen
seggreen[:,:,1]*=maskgreen
seggreen[:,:,2]*=maskgreen
io.imshow(seggreen)
io.imsave('lightgreen.png',seggreen)
io.show()
segyellow[:,:,0]*=maskyellow
segyellow[:,:,1]*=maskyellow
segyellow[:,:,2]*=maskyellow
io.imshow(segyellow)
io.imsave('lightyellow.png',segyellow)
#io.show()
def ton_and_color_corrections():
#色调和彩色校正
image=data.astronaut()
h1=color.rgb2hsv(image)
h2=h1.copy()
h1[:,:,1]=h1[:,:,1]*0.5
image1=color.hsv2rgb(h1)
h2[:,:,1]=h2[:,:,1]*0.5+0.5
image2=color.hsv2rgb(h2)
io.imshow(image)
io.imsave('astronaut.png',image)
io.imshow(image1)
io.imsave('astronautlight.png',image1)
io.imshow(image2)
io.imsave('astronautdark.png',image2)
imagered=image.copy()
imagered[:,:,0]=image[:,:,0]*127.0/255+128
io.imsave('astronautred.png',imagered)
imageblue=image.copy()
imageblue[:,:,2]=image[:,:,2]*127.0/255+128
io.imsave('astronautblue.png',imageblue)
imageyellow=image.copy()
imageyellow[:,:,0]=image[:,:,0]*127.0/255+128
imageyellow[:,:,1]=image[:,:,1]*127.0/255+128
io.imsave('astronautyellow.png',imageyellow)
io.imshow(imageyellow)
def hsi_equalize_hist():
image=data.astronaut()
h=color.rgb2hsv(image)
h[:,:,2]=exposure.equalize_hist(h[:,:,2])
image_equal=color.hsv2rgb(h)
io.imshow(image_equal)
io.imsave('astronautequal.png',image_equal)
def smoothe_and_sharp():
image=data.astronaut()
g3=filters.gaussian(image,3)
io.imsave('astronautgaussian3.png',g3)
g9=filters.gaussian(image,9)
io.imsave('astronautgaussian9.png',g9)
g15=filters.gaussian(image,15)
io.imsave('astronautgaussian15.png',g15)
image=io.imread('astronautgaussian3.png')
sharprgb=image.copy()
for i in range(3):
l=np.abs(filters.laplace(image[:,:,i]))
sharprgb[:,:,i]=np.uint8(np.minimum(image[:,:,i]+l/l.max()*55,255))
io.imsave('astronautsharprgb.png',sharprgb)
sharphsv=color.rgb2hsv(image)
l=np.abs(filters.laplace(sharphsv[:,:,2]))
sharphsv[:,:,2]=np.minimum(l/l.max()*0.5+sharphsv[:,:,2],1)
io.imsave('astronautsharphsv.png',color.hsv2rgb(sharphsv))
def color_segment():
image=data.astronaut()
r=np.uint8((image[:,:,0]>100 ) & (image[:,:,1]<100) & (image[:,:,2]<100))
io.imsave('astronautsegr.png',r*255)
g=np.uint8((image[:,:,0]<100 ) & (image[:,:,1]>100) & (image[:,:,2]<100))
io.imsave('astronautsegg.png',g*255)
b=np.uint8((image[:,:,0]<100 ) & (image[:,:,1]<100) & (image[:,:,2]>100))
io.imsave('astronautsegb.png',b*255)
def color_edge():
image=data.astronaut()
r=np.abs(filters.sobel(image[:,:,0]))
r=np.uint8(r/r.max()*255)
io.imsave('astronautedger.png',r)
g=np.abs(filters.sobel(image[:,:,1]))
g=np.uint8(g/g.max()*255)
io.imsave('astronautedgeg.png',g)
b=np.abs(filters.sobel(image[:,:,2]))
b=np.uint8(b/b.max()*255)
io.imsave('astronautedgeb.png',b)
def color_noise():
image=data.astronaut()
noiseg=util.noise.random_noise(image,'gaussian')
io.imsave('astronautnoiseg.png',noiseg)
io.imsave('astronautnoisegr.png',noiseg[:,:,0])
io.imsave('astronautnoisegg.png',noiseg[:,:,1])
io.imsave('astronautnoisegb.png',noiseg[:,:,2])
noiseghsv=color.rgb2hsv(noiseg)
io.imsave('astronautnoisegh.png',noiseghsv[:,:,0])
io.imsave('astronautnoisegs.png',noiseghsv[:,:,1])
io.imsave('astronautnoisegv.png',noiseghsv[:,:,2])
noisei=util.noise.random_noise(image,'s&p')
io.imsave('astronautnoisei.png',noisei)
io.imsave('astronautnoiseir.png',noisei[:,:,0])
io.imsave('astronautnoiseig.png',noisei[:,:,1])
io.imsave('astronautnoiseib.png',noisei[:,:,2])
noiseihsv=color.rgb2hsv(noisei)
io.imsave('astronautnoiseih.png',noiseihsv[:,:,0])
io.imsave('astronautnoiseis.png',noiseihsv[:,:,1])
io.imsave('astronautnoiseiv.png',noiseihsv[:,:,2])
| gpl-3.0 |
ua-snap/downscale | snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/ar5_model_data_downscaling_class.py | 3 | 30806 | # # # # #
# Tool to downscale the CMIP5 data from the PCMDI group.
# # # # #
import rasterio, xray, os
import numpy as np
import pandas as pd
import numpy as np
class DownscalingUtils( object ):
def write_gtiff( self, output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details.
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid( self, lon0, datain, lonsin, start=True, cyclic=360.0 ):
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( self, bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( self, rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
return new_bounds
def xyz_to_grid( self, x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', 'linear'
'''
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
# make this a simple regrid command instead of interpolating the anomalies
def interpolate_anomalies( self, anom_df, meshgrid_tuple, template_raster_fn, lons_pcll, \
src_transform, src_crs, src_nodata, output_filename, write_anomalies, *args, **kwargs ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
ARGUMENTS:
---------
anom_df = []
meshgrid_tuple = []
template_raster_fn = []
lons_pcll = []
src_transform = []
src_crs = []
src_nodata = []
output_filename = []
write_anomalies = []
RETURNS:
-------
if write_anomalies == True: [str] path to the output filename generated
if write_anomalies == False: [tuple] interpolated NumPy ndarray representing the
interpolated anomalies and the rasterio-style metadata dictionary describing
the newly generated raster.
'''
from rasterio.warp import reproject, RESAMPLING
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
# update some meta configs
template_meta.update( compress='lzw', crs={'init':'epsg:3338'} )
interp_arr = self.xyz_to_grid( np.array(anom_df['lon'].tolist()), \
np.array(anom_df['lat'].tolist()), \
np.array(anom_df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = self.shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.cubic_spline, SOURCE_EXTRA=1000 )
# mask it with the internal mask in the template raster, where 0 is oob.
output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
output_arr.fill_value = template_meta[ 'nodata' ]
output_arr = output_arr.filled()
if write_anomalies == True:
out = self.write_gtiff( output_arr, template_meta, output_filename, compress=True )
elif write_anomalies == False:
out = ( output_arr, template_meta )
else:
AttributeError( 'interpolate_anomalies: write_anomalies can be True or False only.')
return out
def downscale( self, anom_arr, baseline_arr, output_filename, \
downscaling_operation, meta, post_downscale_function, *args, **kwargs ):
'''
downscale an anomaly array with a baseline array from the same period.
Arguments:
----------
anom_arr = [ np.ndarray ] 2-D NumPy array representing a raster domain.
anom/baseline arrays must be same shape.
baseline_arr = [ np.ndarray ] 2-D NumPy array representing a raster domain.
anom/baseline arrays must be same shape.
output_filename = [ str ] full path and output filename to be created
downscaling_operation = [ ]
meta = [ dict ] rasterio-style dictionary of raster metadata attributes. This
must jive with the dimensions and the data type of the array generated
through downscaling anom_arr with baseline_arr.
post_downscale_function = [ function ] a function that takes a 2-D downscaled
array as input and returns an array of the same shape / datatype. This
is typically used as a post-mortem for clamping the values from an output
downscaled array that may be slightly outside the range due to the
interpolation method. We currently use this to clamp the values of the hur
to 0-100.
Returns:
--------
output_filename of newly generated downscaled raster.
'''
def add( base, anom ):
return base + anom
def mult( base, anom ):
return base * anom
def div( base, anom ):
# this one may not be useful, but the placeholder is here
# return base / anom
return NotImplementedError
try:
operation_switch = { 'add':add, 'mult':mult, 'div':div }
except:
AttributeError( 'downscale: incorrect downscaling_operation str' )
# [ CHECK ] This may be something better to be done before passing to this function
# both files need to be masked here since we use a RIDICULOUS oob value...
# for both tas and cld, values less than -200 are out of the range of acceptable values and it
# grabs the -3.4... mask values. so lets mask using this
baseline_arr = np.ma.masked_where( baseline_arr < -200, baseline_arr )
anom_arr = np.ma.masked_where( anom_arr < -200, anom_arr )
output_arr = operation_switch[ downscaling_operation ]( baseline_arr, anom_arr )
output_arr[ np.isinf( output_arr ) ] = meta[ 'nodata' ]
if post_downscale_function != None:
output_arr = post_downscale_function( output_arr )
if 'transform' in meta.keys():
# avoid the gdal geotransform deprecation warning
meta.pop( 'transform' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( output_arr, 1 )
return output_filename
class DownscaleAR5( object ):
def __init__( self, ar5_modeled=None, ar5_historical=None, base_path=None, clim_path=None, climatology_begin='1961', climatology_end='1990', \
plev=None, absolute=True, metric='metric', variable=None, ncores=2, *args, **kwargs ):
'''
NEW METHODS FOR AR5 DOWNSCALING USING THE NEW
API-ECOSYSTEM.
'''
self.ar5_modeled = ar5_modeled
self.ar5_historical = ar5_historical
self.base_path = base_path
self.clim_path = clim_path
self.climatology_begin = climatology_begin
self.climatology_end = climatology_end
self.plev = plev
self.absolute = absolute
self.metric = metric
self.variable = variable
self.ncores = ncores
self.utils = DownscalingUtils()
@staticmethod
def standardized_fn_to_vars( fn ):
''' take a filename string following the convention for this downscaling and break into parts and return a dict'''
name_convention = [ 'variable', 'cmor_table', 'model', 'scenario', 'experiment', 'begin_time', 'end_time' ]
fn = os.path.basename( fn )
fn_list = fn.split( '.' )[0].split( '_' )
return { i:j for i,j in zip( name_convention, fn_list ) }
def _calc_anomalies( self, *args, **kwargs ):
'''
calculate absolute or relative anomalies given a NetCDF file
of the Climatic Research Unit (CRU) Historical Time Series.
'''
import xray
# handle modeled vs. historical
if self.ar5_modeled != None and self.ar5_historical != None:
# parse the input name for some file metadata HARDWIRED!
output_naming_dict = DownscaleAR5.standardized_fn_to_vars( self.ar5_modeled )
variable = output_naming_dict[ 'variable' ]
# read in both modeled and historical
ds = xray.open_dataset( self.ar5_modeled )
ds = ds[ variable ]
clim_ds = xray.open_dataset( self.ar5_historical )
# climatology
clim_ds = clim_ds.loc[ {'time':slice(self.climatology_begin,self.climatology_end)} ]
climatology = clim_ds[ variable ].groupby( 'time.month' ).mean( 'time' )
del clim_ds
elif self.ar5_historical is not None and self.ar5_modeled is None:
output_naming_dict = standardized_fn_to_vars( self.ar5_historical )
variable = output_naming_dict[ 'variable' ]
# read in historical
ds = xray.open_dataset( self.ar5_historical )
# climatology
climatology = ds.loc[ {'time':slice(self.climatology_begin,self.climatology_end)} ]
climatology = climatology[ variable ].groupby( 'time.month' ).mean( 'time' )
else:
NameError( 'ERROR: must have both ar5_modeled and ar5_historical, or just ar5_historical' )
if self.plev is not None:
plevel, = np.where( ds.plev == self.plev )
ds = ds[ :, plevel[0], ... ]
climatology = climatology[ :, plevel[0], ... ]
# anomalies
if self.absolute == True:
anomalies = ds.groupby( 'time.month' ) - climatology
elif self.absolute == False:
anomalies = ds.groupby( 'time.month' ) / climatology
else:
AttributeError( '_calc_anomalies (ar5): absolute can only be True or False' )
return anomalies
def _interp_downscale_wrapper( self, args_dict, *args, **kwargs ):
'''
interpolate anomalies and downscale to the baseline arr
'''
output_filename = args_dict[ 'output_filename' ]
args_dict.update( output_filename=output_filename.replace( 'downscaled', 'anom' ) )
anom = self.utils.interpolate_anomalies( **args_dict )
if isinstance( anom, basestring ):
rst = rasterio.open( anom )
meta = rst.meta
meta.update( compress='lzw' )
anom_arr = rst.read( 1 )
elif isinstance( anom, tuple ):
anom_arr, meta = anom
else:
AttributeError( '_interp_downscale_wrapper: passed wrong instance type' )
args_dict.update( output_filename=output_filename, anom_arr=anom_arr, meta=meta )
return self.utils.downscale( **args_dict )
def downscale_ar5_ts( self, *args, **kwargs ):
from pathos.mp_map import mp_map
# build output dirs
# template setup
# calc the anomalies
anomalies = self._calc_anomalies()
anomalies_pcll, lons_pcll = self.utils.shiftgrid( 0., anomalies, anomalies.lon.data ) # grabs lons from the xray ds
# mesh the lons and lats and unravel them to 1-D
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ]
# convert into pandas.DataFrame and drop all the NaNs -- land-only dataset
anom_df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in anomalies_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup -- HARDWIRED
# src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
# src_nodata = -9999.0
# [!] THE ABOVE ARE INCORRECT FOR THE MODELED DATA
# output_filenames setup
dates = ds.time.to_pandas()
years = dates.apply( lambda x: x.year ).tolist()
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
# read in the pre-processed 12-month climatology
clim_list = sorted( glob.glob( os.path.join( self.clim_path, '*.tif' ) ) ) # this could catch you.
clim_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, clim_list ) }
# [!] THIS BELOW NEEDS RE-WORKING FOR THE AR5 DATA MODELED DATA
output_filenames = [ os.path.join( downscaled_path, '_'.join([ variable, self.metric, cru_ts_version, 'downscaled', month, str(year) ])+'.tif' )
for month, year in month_year ]
# set downscaling_operation based on self.absolute boolean
if self.absolute == True:
downscaling_operation = 'add'
elif self.absolute == False:
downscaling_operation = 'mult'
else:
AttributeError( 'downscaling operation: self.absolute must be boolean' )
args_list = [ { 'anom_df':anom_df,
'meshgrid_tuple':(xi, yi),
'template_raster_fn':template_raster_fn,
'lons_pcll':lons_pcll,
'src_transform':src_transform,
'src_crs':self.src_crs, \
'src_nodata':src_nodata,
'output_filename':out_fn,
'baseline_arr':clim_dict[ self._fn_month_grouper( out_fn ) ],
'downscaling_operation':downscaling_operation,
'post_downscale_function':self.post_downscale_function,
'write_anomalies':self.write_anomalies }
for anom_df, out_fn in zip( anom_df_list, output_filenames ) ]
# run anomalies interpolation and downscaling in a single go.
# ( anom_df, meshgrid_tuple, template_raster_fn, lons_pcll, src_transform, src_crs, src_nodata, output_filename, write_anomalies )
out = mp_map( lambda args: self._interp_downscale_wrapper( args_dict=args ), args_list, nproc=self.ncores )
return 'downscaling complete. files output at: %s' % base_path
if __name__ == '__main__':
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# example of use of the new DownscaleCRU / DownscalingUtils classes
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
import os, rasterio, xray, glob
import pandas as pd
import numpy as np
# input args
ar5_modeled = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/IPSL-CM5A-LR/clt/clt_Amon_IPSL-CM5A-LR_rcp26_r1i1p1_200601_210012.nc'
ar5_historical = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/IPSL-CM5A-LR/clt/clt_Amon_IPSL-CM5A-LR_historical_r1i1p1_185001_200512.nc'
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/atlas_scratch/malindgren/CMIP5'
# EXAMPLE RUN -- TESTING
down = DownscaleAR5( ar5_modeled, ar5_historical, base_path, clim_path, ncores=32) #, climatology_begin, climatology_end, plev, absolute, metric, ncores )
output = down.downscale_ar5_ts()
# # # OLD SHIT BELOW
# if __name__ == '__main__':
# import pandas as pd
# import numpy as np
# import os, sys, re, xray, rasterio, glob, argparse
# from rasterio import Affine as A
# from rasterio.warp import reproject, RESAMPLING
# from pathos import multiprocessing as mp
# # parse the commandline arguments
# parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
# parser.add_argument( "-mi", "--modeled_fn", nargs='?', const=None, action='store', dest='modeled_fn', type=str, help="path to modeled input filename (NetCDF); default:None" )
# parser.add_argument( "-hi", "--historical_fn", nargs='?', const=None, action='store', dest='historical_fn', type=str, help="path to historical input filename (NetCDF); default:None" )
# parser.add_argument( "-o", "--output_path", action='store', dest='output_path', type=str, help="string path to the output folder containing the new downscaled outputs" )
# parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYYMM or YYYY of the beginning month and potentially (year) of the climatology period" )
# parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYYMM or YYYY of the ending month and potentially (year) of the climatology period" )
# parser.add_argument( "-plev", "--plev", nargs='?', const=None, action='store', dest='plev', type=int, help="integer value (in millibars) of the desired pressure level to extract, if there is one." )
# parser.add_argument( "-cru", "--cru_path", action='store', dest='cru_path', type=str, help="path to the directory storing the cru climatology data derived from CL2.0" )
# parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
# parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
# parser.add_argument( "-dso", "--downscale_operation", action='store', dest='downscale_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
# parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
# # parse args
# args = parser.parse_args()
# # unpack args
# modeled_fn = args.modeled_fn
# historical_fn = args.historical_fn
# output_path = args.output_path
# climatology_begin = args.climatology_begin
# climatology_end = args.climatology_end
# plev = args.plev
# cru_path = args.cru_path
# anomalies_calc_type = args.anomalies_calc_type
# metric = args.metric
# downscale_operation = args.downscale_operation
# ncores = args.ncores
# # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# # THIS APPEARS TO BE THE MAIN NOT THE DOWNSCALER.
# def downscale( src, dst, cru, src_crs, src_affine, dst_crs, dst_affine, output_filename, dst_meta, variable,\
# method='cubic_spline', operation='add', output_dtype='float32', **kwargs ):
# '''
# operation can be one of two keywords for the operation to perform the delta downscaling
# - keyword strings are one of: 'add'= addition, 'mult'=multiplication, or 'div'=division (not implemented)
# - method can be one of 'cubic_spline', 'nearest', 'bilinear' and must be input as a string.
# - output_dtype can be one of 'int32', 'float32'
# '''
# from rasterio.warp import reproject, RESAMPLING
# def add( cru, anom ):
# return cru + anom
# def mult( cru, anom ):
# return cru * anom
# def div( cru, anom ):
# # return cru / anom
# # this one may not be useful, but the placeholder is here
# return NotImplementedError
# # switch to deal with numeric output dtypes
# dtypes_switch = {'int32':np.int32, 'float32':np.float32}
# # switch to deal with different resampling types
# method_switch = { 'nearest':RESAMPLING.nearest, 'bilinear':RESAMPLING.bilinear, 'cubic_spline':RESAMPLING.cubic_spline }
# method = method_switch[ method ]
# # reproject src to dst
# out = np.zeros( dst.shape )
# reproject( src,
# out,
# src_transform=src_affine,
# src_crs=src_crs,
# dst_transform=dst_affine,
# dst_crs=dst_crs,
# resampling=method )
# # switch to deal with different downscaling operators
# operation_switch = { 'add':add, 'mult':mult, 'div':div }
# downscaled = operation_switch[ operation ]( cru, out )
# # reset any > 100 values to 95 if the variable is cld or hur
# if variable == 'clt' or variable == 'hur' or variable == 'cld':
# downscaled[ downscaled > 100.0 ] = 95.0
# # give the proper fill values to the oob regions
# downscaled.fill_value = dst_meta['nodata']
# downscaled = downscaled.filled()
# # this is a geotiff creator so lets pass in the lzw compression
# dst_meta.update( compress='lzw' )
# with rasterio.open( output_filename, 'w', **dst_meta ) as out:
# out.write( downscaled.astype( dtypes_switch[ output_dtype ] ), 1 )
# return output_filename
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# [NOTE]: hardwired raster metadata meeting the ALFRESCO Model's needs for
# perfectly aligned inputs this is used as template metadata that
# is used in output generation. template raster filename below:
# '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/
# TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# NO!
# meta_3338 = {'affine': A(2000.0, 0.0, -2173223.206087799,
# 0.0, -2000.0, 2548412.932644147),
# 'count': 1,
# 'crs': {'init':'epsg:3338'},
# 'driver': u'GTiff',
# 'dtype': 'float32',
# 'height': 1186,
# 'nodata': -3.4e+38,
# 'width': 3218,
# 'compress':'lzw'}
# # output template numpy array same dimensions as the template
# dst = np.empty( (1186, 3218) )
# # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# # condition to deal with reading in historical data if needed.
# if modeled_fn != None and historical_fn != None:
# # parse the input name for some file metadata
# output_naming_dict = standardized_fn_to_vars( modeled_fn )
# # this is to maintain cleanliness
# variable = output_naming_dict[ 'variable' ]
# # read in both modeled and historical
# ds = xray.open_dataset( modeled_fn )
# ds = ds[ variable ].load()
# clim_ds = xray.open_dataset( historical_fn )
# clim_ds = clim_ds[ variable ].load()
# # generate climatology / anomalies
# clim_ds = clim_ds.loc[ {'time':slice(climatology_begin,climatology_end)} ]
# climatology = clim_ds.groupby( 'time.month' ).mean( 'time' )
# # find the begin/end years of the prepped files
# dates = ds.time.to_pandas()
# years = dates.apply( lambda x: x.year )
# begin_time = years.min()
# end_time = years.max()
# del clim_ds
# elif historical_fn is not None and modeled_fn is None:
# # parse the input name for some file metadata
# output_naming_dict = standardized_fn_to_vars( historical_fn )
# # this is to maintain cleanliness
# variable = output_naming_dict[ 'variable' ]
# # read in historical
# ds = xray.open_dataset( historical_fn )
# ds = ds[ variable ].load()
# # generate climatology / anomalies
# climatology = ds.loc[ {'time':slice(climatology_begin,climatology_end)} ]
# climatology = climatology.groupby( 'time.month' ).mean( 'time' )
# # find the begin/end years of the prepped files
# dates = ds.time.to_pandas()
# years = dates.apply( lambda x: x.year )
# begin_time = years.min()
# end_time = years.max()
# else:
# NameError( 'ERROR: must have both modeled_fn and historical_fn, or just historical_fn' )
# standardize the output pathing
if output_naming_dict[ 'variable' ] == 'clt':
variable_out = 'cld'
else:
variable_out = output_naming_dict[ 'variable' ]
output_path = os.path.join( output_path, 'ar5', output_naming_dict['model'], variable_out, 'downscaled' )
if not os.path.exists( output_path ):
os.makedirs( output_path )
# # if there is a pressure level to extract, extract it
# if plev is not None:
# plevel, = np.where( ds.plev == plev )
# ds = ds[ :, plevel[0], ... ]
# climatology = climatology[ :, plevel[0], ... ]
# deal with different anomaly calculation types
if anomalies_calc_type == 'absolute':
anomalies = ds.groupby( 'time.month' ) - climatology
elif anomalies_calc_type == 'proportional':
anomalies = ds.groupby( 'time.month' ) / climatology
else:
NameError( 'anomalies_calc_type can only be one of "absolute" or "proportional"' )
# some setup of the output raster metadata
time_len, rows, cols = anomalies.shape
crs = 'epsg:4326'
affine = A( *[np.diff( ds.lon )[ 0 ], 0.0, -180.0, 0.0, -np.diff( ds.lat )[ 0 ], 90.0] )
count = time_len
resolution = ( np.diff( ds.lat )[ 0 ], np.diff( ds.lon )[ 0 ] )
# close the dataset and clean it up
ds = None
# shift the grid to Greenwich Centering
dat, lons = shiftgrid( 180., anomalies[:], anomalies.lon.data, start=False )
# metadata for input?
meta_4326 = {'affine':affine,
'height':rows,
'width':cols,
'crs':crs,
'driver':'GTiff',
'dtype':np.float32,
'count':time_len,
'compress':'lzw' }
# build some filenames for the outputs to be generated
# months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
years = [ str(year) for year in range( begin_time, end_time + 1, 1 ) ]
# combine the months and the years
combinations = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( output_path, '_'.join([variable_out, 'metric', output_naming_dict['model'], output_naming_dict['scenario'], output_naming_dict['experiment'], month, year]) + '.tif' ) for month, year in combinations ]
# load the baseline CRU CL2.0 data
# [NOTE]: THIS ASSUMES THEY ARE THE ONLY FILES IN THE DIRECTORY -- COULD BE A GOTCHA
cru_files = glob.glob( os.path.join( cru_path, '*.tif' ) )
cru_files.sort()
cru_stack = [ rasterio.open( fn ).read( 1 ) for fn in cru_files ]
# this is a hack to make a masked array with the cru data
cru_stack = [ np.ma.masked_where( cru == cru.min(), cru ) for cru in cru_stack ]
import itertools
cru_gen = clim_generator( len(output_filenames), cru_stack )
# cleanup some uneeded vars that are hogging RAM
del climatology, anomalies
# run in parallel using PATHOS
pool = mp.Pool( processes=ncores )
args_list =
args_list = [{ 'src':src,
'output_filename':fn,
'dst':dst,
'cru':cru,
'src_crs':meta_4326[ 'crs' ],
'src_affine':meta_4326[ 'affine' ],
'dst_crs':meta_3338[ 'crs' ],
'dst_affine':meta_3338[ 'affine' ],
'dst_meta':meta_3338,
'operation':downscale_operation,
'variable':variable }
for src,fn,cru in zip( np.vsplit( dat, time_len ), output_filenames, cru_gen ) ]
del dat, cru_gen, cru_stack
out = pool.map( run, args_list )
pool.close()
| mit |
tedmeeds/tcga_encoder | tcga_encoder/analyses/old/weight_clustering2.py | 1 | 15666 | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
#from tcga_encoder.data.pathway_data import Pathways
from tcga_encoder.data.hallmark_data import Pathways
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from scipy import stats
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
import networkx as nx
import json
from networkx.readwrite import json_graph
size_per_unit=0.25
def process_all_sources( save_dir, source2w, prefix ):
sources = source2w.keys()
ws = source2w.values()
#pdb.set_trace()
shapes2use = ["circle","square","triangle-up"]
scores2use = [0,0.5,1.0]
colors2use = ["red","blue","green"]
counts = [len(w) for w in ws]
W = pd.concat(ws,0)
#W=W/np.sqrt( np.sum( np.square( W.values ),0 ))
#pdb.set_trace()
n_features = len(W)
shapes = []
scores = []
colors = []
for i in xrange(n_features):
if i < counts[0]:
shapes.append( shapes2use[0] )
scores.append( scores2use[0] )
colors.append( colors2use[0] )
elif i < counts[1]+counts[0]:
shapes.append( shapes2use[1] )
scores.append( scores2use[1] )
colors.append( colors2use[1] )
else:
shapes.append( shapes2use[2] )
scores.append( scores2use[2] )
colors.append( colors2use[2] )
shapes = np.array(shapes,dtype=str)
colors = np.array(colors,dtype=str)
scores = np.array(scores,dtype=float)
sizes = 10*np.ones(n_features)
w_corr = W.T.corr()
corr_v = w_corr.values
names = w_corr.columns
min_corr = 0.8
keep_ids = []
for i in xrange(n_features):
c = corr_v[i]
if sum( np.abs(c) > min_corr ) > 1:
keep_ids.append(i )
print "keeping %d of %d nodes"%(len(keep_ids),n_features)
keep_ids = np.array(keep_ids)
keep_names = names[keep_ids]
keep_shapes = shapes[keep_ids]
keep_sizes = sizes[keep_ids]
keep_scores = scores[keep_ids]
keep_colors = colors[keep_ids]
w_corr = w_corr.loc[ keep_names ][keep_names]
corr_v = w_corr.values
n_features = len(w_corr)
#pdb.set_trace()
#
tau = min_corr
G=nx.Graph()
i=0
nodes = []
links = []
nodes_ids=[]
node_ids = OrderedDict()
#flare = OrderedDict()
for i,c,name_i in zip( xrange( n_features ), corr_v, keep_names ):
for j,name_j in zip( xrange(n_features), keep_names ):
if j > i:
if np.abs( c[j] ) > tau:
if node_ids.has_key(name_i) is False:
nodes.append( {"id":name_i})
if node_ids.has_key(name_j) is False:
nodes.append( {"id":name_j})
links.append( {"source":i,"target":j,"w":c[j]} )
nodes_ids.append(i)
nodes_ids.append(j)
nodes_ids = np.unique( np.array(nodes_ids))
json_node = []
for i,name,size,score,shape,color in zip( xrange( n_features ), keep_names, keep_sizes, keep_scores, keep_shapes, keep_colors ):
# name = names[i]
# size = int(80*total_weights[i])
# score = 1
# type = "circle"
json_node.append( {"size":size,"score":score,"id":name,"type":shape})
G.add_node(name, color=color, size=size )
json.dump({"nodes":json_node,"links":links,"directed": False,
"multigraph": False,"graph": []}, open(save_dir+'/all_force%s3.json'%(prefix),'w'))
for link in links:
G.add_edge( keep_names[link["source"]], keep_names[link["source"]], weight = np.abs(link["source"]) )
from networkx.drawing.nx_agraph import graphviz_layout
layout=graphviz_layout
print "laying out graph"
pos=layout(G)
pp.figure(figsize=(45,45))
print "drawing graph"
nx.draw(G,pos,
with_labels=True, hold=False, alpha=0.25, font_size=12
)
# d = json_graph.node_link_data(G)
G.clear()
pp.savefig(save_dir + "/mwst%s.png"%(prefix), fmt='png',dpi=300)
def process_source( save_dir, source, w, percent_weights, prefix="" ):
#corr = w.T.corr()
sorted_flattened = np.sort( np.abs(w.values.flatten()) )
n = len(sorted_flattened)
threshold = sorted_flattened[ - int( float(n)*percent_weights) ]
#w = w[ np.abs(w) >= threshold ].fillna(0)
#w = np.sign(w)
#pdb.set_trace()
total_weights = np.abs(w.values).sum(1)
corr = w.T.corr()
corr.sort_index(inplace=True)
corr = corr[ corr.index.values ]
corr_v = corr.values
names = corr.columns
n_source = len(names)
size1 = max( min( 40, int( w.values.shape[0]*size_per_unit ) ), 12 )
size2 = max( min( 40, int( w.values.shape[0]*size_per_unit )), 12 )
# cmap = sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
# htmap3 = sns.clustermap ( corr, cmap=cmap, square=True, figsize=(size1,size1) )
# pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
# pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
# pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
# pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
# htmap3.ax_row_dendrogram.set_visible(False)
# htmap3.ax_col_dendrogram.set_visible(False)
# pp.savefig( save_dir + "/weights_%s_clustermap%s.png"%(source,prefix), fmt="png", bbox_inches = "tight")
#
#labels = [s.get_text() for s in htmap3.ax_heatmap.yaxis.get_majorticklabels()]
#corr = corr[labels]
#corr = corr.loc[labels]
corr_v = corr.values
names = corr.columns
# csr = csr_matrix(np.triu(1.0-np.abs(meth_corr.values)))
# Tcsr = minimum_spanning_tree(csr)
# as_mat = Tcsr.toarray()
#pdb.set_trace()
pp.figure(figsize=(45,45))
tau = 0.5
G=nx.Graph()
i=0
nodes = []
links = []
nodes_ids=[]
node_ids = OrderedDict()
#flare = OrderedDict()
for i in xrange( n_source ):
x = corr_v[i]
name_i = names[i]
#flare[name_i] = []
for j in xrange(n_source):
if j > i:
if np.abs( x[j] ) > tau:
name_j = names[j]
G.add_edge(name_i, name_j, weight = np.abs(x[j]) )
if node_ids.has_key(name_i) is False:
nodes.append( {"id":name_i})
#node_ids[name_i] = 1
#flare[name_i] = []
if node_ids.has_key(name_j) is False:
nodes.append( {"id":name_j})
#node_ids[name_i] = 1
links.append( {"source":i,"target":j} ) #, "value":np.abs(x[j])} )
#flare[name_i].append( name_j )
nodes_ids.append(i)
nodes_ids.append(j)
nodes_ids = np.unique( np.array(nodes_ids))
json_node = []
for i in xrange( n_source ):
name = names[i]
size = int(80*total_weights[i])
score = 1
type = "circle"
json_node.append( {"size":size,"score":score,"id":name,"type":type})
from networkx.drawing.nx_agraph import graphviz_layout
layout=graphviz_layout
#layout=nx.spectral_layout
pos=layout(G)
nx.draw(G,pos,
with_labels=True,
node_size=20, hold=False, node_color='b', alpha=0.25, font_size=12
)
d = json_graph.node_link_data(G)
#pdb.set_trace()
json.dump({"nodes":json_node,"links":links,"directed": False,
"multigraph": False,"graph": []}, open(save_dir+'/%s_force%s2.json'%(source,prefix),'w'))
# names = flare.keys()
# targets = flare.values()
# for target_list in targets:
#
# flares=[]
# targets = []
# for name_i,list_j in flare.iteritems():
# o=OrderedDict()
# o["name"] = name_i
# o["size"] = 100*len(list_j)
# o["imports"] = list_j
# flares.append( o )
#
# #targets.extend( )
#
#
# json.dump(flares, open(save_dir+'/%s_flare%s.json'%(source,prefix),'w'))
#from networkx.readwrite import json_graph
G.clear()
#pp.title("%s"%(tissue_name))
pp.savefig(save_dir + "/%s_mwst%s.png"%(source,prefix), fmt='png',dpi=300)
print " only doing one source now"
def join_weights( W_hidden2z, W_hidden ):
W = {}
n_z = W_hidden2z.shape[1]
columns = np.array( ["z_%d"%i for i in range(n_z)])
for input_source, source_w in W_hidden.iteritems():
#pdb.set_trace()
W[ input_source ] = pd.DataFrame( np.dot( source_w, W_hidden2z ), index = source_w.index, columns = columns )
return W
def get_hidden2z_weights( model_store ):
layer = "rec_z_space"
model_store.open()
w = model_store[ "%s"%(layer) + "/W/w%d"%(0)].values
model_store.close()
return w
def get_hidden_weights( model_store, input_sources, data_store ):
rna_genes = data_store["/RNA/FAIR"].columns
meth_genes = data_store["/METH/FAIR"].columns
mirna_hsas = data_store["/miRNA/FAIR"].columns
post_fix = "_scaled"
idx=1
n_sources = len(input_sources)
W = {}
for w_idx, input_source in zip( range(n_sources), input_sources ):
w = model_store[ "rec_hidden" + "/W/w%d"%(w_idx)].values
#pdb.set_trace()
d,k = w.shape
columns = np.array( ["h_%d"%i for i in range(k)])
if input_source == "RNA":
rows = rna_genes
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "miRNA":
rows = mirna_hsas
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "METH":
rows = meth_genes
#rows = np.array( [ "M-%s"%g for g in meth_genes], dtype=str )
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "TISSUE":
rows = tissue_names
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
model_store.close()
return W
def auc_standard_error( theta, nA, nN ):
# from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# theta: estimated AUC, can be 0.5 for a random test
# nA size of population A
# nN size of population N
Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
return SE
def auc_test( true_y, est_y ):
n = len(true_y)
n_1 = true_y.sum()
n_0 = n - n_1
if n_1 == 0 or n_1 == n:
return 0.5, 0.0, 0.0, 1.0
auc = roc_auc_score( true_y, est_y )
difference = auc - 0.5
if difference < 0:
# switch labels
se = auc_standard_error( auc, n_0, n_1 )
se_null = auc_standard_error( 0.5, n_0, n_1 )
else:
se = auc_standard_error( 1-auc, n_1, n_0 )
se_null = auc_standard_error( 0.5, n_1, n_0 )
se_combined = np.sqrt( se**2 + se_null**2 )
z_value = np.abs(difference) / se_combined
p_value = 1.0 - stats.norm.cdf( np.abs(z_value) )
return auc, se, z_value, p_value
def find_keepers_over_groups( z, groups, name, nbr2keep, stats2use ):
inners = []; p_inners=[]
mx_inner = 0.0
norm_z = np.linalg.norm(z)
for X, stat in zip( groups, stats2use ):
pearsons = np.zeros( X.shape[1] )
pvalues = np.zeros( X.shape[1] )
for x,x_idx in zip( X.values.T, range(X.shape[1])):
if stat == "pearson":
pearsons[x_idx], pvalues[x_idx] = stats.pearsonr( z, x )
elif stat == "auc":
true_y = (x>0).astype(int)
auc, se, zvalue, pvalue = auc_test( true_y, z ) #np.sqrt( ses_tissue**2 + se_r_tissue**2 )
pearsons[x_idx] = auc-0.5
pvalues[x_idx] = pvalue
#pdb.set_trace()
#norms = norm_z*np.linalg.norm( X, axis=0 )
#inner = pd.Series( np.dot( z, X )/norms, index = X.columns, name=name )
inner = pd.Series( pearsons, index = X.columns, name=name )
p_inner = pd.Series( pvalues, index = X.columns, name=name )
inners.append(inner)
p_inners.append(p_inner)
this_mx = np.max(np.abs(inner))
if this_mx > mx_inner:
mx_inner = this_mx
all_keepers = []
#all_pvalues = []
for inner,p_inner in zip(inners,p_inners):
#inner.sort_values(inplace=True)
#inner = inner / mx_inner
#abs_inner = np.abs( inner )
#ordered = np.argsort( -inner.values )
ordered = np.argsort( p_inner.values )
ordered = pd.DataFrame( np.vstack( (inner.values[ordered],p_inner.values[ordered] ) ).T, index =inner.index[ordered],columns=["r","p"] )
#pdb.set_trace()
#keepers = pd.concat( [ordered[:nbr2keep], ordered[-nbr2keep:]], axis=0 )
keepers = ordered[:nbr2keep]
#pdb.set_trace()
#keepers = keepers.sort_values()
all_keepers.append(keepers)
return all_keepers
def find_keepers(z, X, name, nbr2keep):
inner = pd.Series( np.dot( z, X ), index = X.columns, name=name )
inner.sort_values(inplace=True)
inner = inner / np.max(np.abs(inner))
#signed = np.sign( inner )
abs_inner = np.abs( inner )
ordered = np.argsort( -abs_inner.values )
ordered = pd.Series( inner.values[ordered], index =inner.index[ordered],name=name )
keepers = ordered[:nbr2keep]
keepers = keepers.sort_values()
return keepers
def main( data_location, results_location ):
pathway_info = Pathways()
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
model_filename = os.path.join( results_path, "full_vae_model.h5" )
save_dir = os.path.join( results_path, "weight_clustering" )
check_and_mkdir(save_dir)
z_dir = os.path.join( save_dir, "z_pics" )
check_and_mkdir(z_dir)
h_dir = os.path.join( save_dir, "h_pics" )
check_and_mkdir(h_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
model_store = pd.HDFStore( model_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
#input_sources = ["METH","RNA","miRNA"]
input_sources = ["RNA","miRNA","METH"]
W_hidden = get_hidden_weights( model_store, input_sources, data_store )
W_hidden2z = get_hidden2z_weights( model_store )
size_per_unit = 0.25
weighted_z = join_weights( W_hidden2z, W_hidden )
barcodes = data_store["/CLINICAL/observed"][ data_store["/CLINICAL/observed"][["RNA","miRNA","METH","DNA"]].sum(1)==4 ].index.values
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
#n = len(Z)
n_tissues = len(tissue_names)
n_h = W_hidden2z.shape[0]
print "+++++++++++++++++++++++++++"
print " find weights that are significant together, not"
#W_hidden["RNA_miRNA"] = pd.concat( [W_hidden["RNA"],W_hidden["miRNA"] ],0 )
percent_weights = 0.05
process_all_sources( save_dir, weighted_z, prefix="_all_Z" )
process_all_sources( save_dir, W_hidden, prefix="_all" )
# for source, w in weighted_z.iteritems():
#
# process_source( save_dir, source, w, percent_weights, prefix="_Z" )
#
# for source, w in W_hidden.iteritems():
#
# process_source( save_dir, source, w, percent_weights )
# #break
pp.close('all')
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location ) | mit |
xwolf12/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
anntzer/scikit-learn | sklearn/preprocessing/tests/test_polynomial.py | 2 | 8555 | import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import KBinsDiscretizer, SplineTransformer
# TODO: add PolynomialFeatures if it moves to _polynomial.py
@pytest.mark.parametrize("est", (SplineTransformer,))
def test_polynomial_and_spline_array_order(est):
"""Test that output array has the given order."""
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(est().fit_transform(X))
assert is_c_contiguous(est(order="C").fit_transform(X))
assert np.isfortran(est(order="F").fit_transform(X))
@pytest.mark.parametrize(
"params, err_msg",
[
({"degree": -1}, "degree must be a non-negative integer."),
({"degree": 2.5}, "degree must be a non-negative integer."),
({"degree": "string"}, "degree must be a non-negative integer."),
({"n_knots": 1}, "n_knots must be a positive integer >= 2."),
({"n_knots": 1}, "n_knots must be a positive integer >= 2."),
({"n_knots": 2.5}, "n_knots must be a positive integer >= 2."),
({"n_knots": "string"}, "n_knots must be a positive integer >= 2."),
({"knots": "string"}, "Expected 2D array, got scalar array instead:"),
({"knots": [1, 2]}, "Expected 2D array, got 1D array instead:"),
(
{"knots": [[1]]},
r"Number of knots, knots.shape\[0\], must be >= 2.",
),
(
{"knots": [[1, 5], [2, 6]]},
r"knots.shape\[1\] == n_features is violated.",
),
(
{"knots": [[1], [1], [2]]},
"knots must be sorted without duplicates.",
),
({"knots": [[2], [1]]}, "knots must be sorted without duplicates."),
(
{"extrapolation": None},
"extrapolation must be one of 'error', 'constant', 'linear' or "
"'continue'.",
),
(
{"extrapolation": 1},
"extrapolation must be one of 'error', 'constant', 'linear' or "
"'continue'.",
),
(
{"extrapolation": "string"},
"extrapolation must be one of 'error', 'constant', 'linear' or "
"'continue'.",
),
({"include_bias": None}, "include_bias must be bool."),
({"include_bias": 1}, "include_bias must be bool."),
({"include_bias": "string"}, "include_bias must be bool."),
],
)
def test_spline_transformer_input_validation(params, err_msg):
"""Test that we raise errors for invalid input in SplineTransformer."""
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
SplineTransformer(**params).fit(X)
def test_spline_transformer_manual_knot_input():
"""Test that array-like knot positions in SplineTransformer are accepted.
"""
X = np.arange(20).reshape(10, 2)
knots = [[0.5, 1], [1.5, 2], [5, 10]]
st1 = SplineTransformer(degree=3, knots=knots).fit(X)
knots = np.asarray(knots)
st2 = SplineTransformer(degree=3, knots=knots).fit(X)
for i in range(X.shape[1]):
assert_allclose(st1.bsplines_[i].t, st2.bsplines_[i].t)
def test_spline_transformer_feature_names():
"""Test that SplineTransformer generates correct features name."""
X = np.arange(20).reshape(10, 2)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)
feature_names = splt.get_feature_names()
assert_array_equal(
feature_names,
[
"x0_sp_0",
"x0_sp_1",
"x0_sp_2",
"x0_sp_3",
"x0_sp_4",
"x1_sp_0",
"x1_sp_1",
"x1_sp_2",
"x1_sp_3",
"x1_sp_4",
],
)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)
feature_names = splt.get_feature_names(["a", "b"])
assert_array_equal(
feature_names,
[
"a_sp_0",
"a_sp_1",
"a_sp_2",
"a_sp_3",
"b_sp_0",
"b_sp_1",
"b_sp_2",
"b_sp_3",
],
)
@pytest.mark.parametrize("degree", range(1, 5))
@pytest.mark.parametrize("n_knots", range(3, 5))
@pytest.mark.parametrize("knots", ["uniform", "quantile"])
def test_spline_transformer_unity_decomposition(degree, n_knots, knots):
"""Test that B-splines are indeed a decomposition of unity.
Splines basis functions must sum up to 1 per row, if we stay in between
boundaries.
"""
X = np.linspace(0, 1, 100)[:, None]
# make the boundaries 0 and 1 part of X_train, for sure.
X_train = np.r_[[[0]], X[::2, :], [[1]]]
X_test = X[1::2, :]
splt = SplineTransformer(
n_knots=n_knots, degree=degree, knots=knots, include_bias=True
)
splt.fit(X_train)
for X in [X_train, X_test]:
assert_allclose(np.sum(splt.transform(X), axis=1), 1)
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
def test_spline_transformer_linear_regression(bias, intercept):
"""Test that B-splines fit a sinusodial curve pretty well."""
X = np.linspace(0, 10, 100)[:, None]
y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=15,
degree=3,
include_bias=bias,
extrapolation="constant",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict(X), y, rtol=1e-3)
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
@pytest.mark.parametrize("degree", [1, 2, 3, 4, 5])
def test_spline_transformer_extrapolation(bias, intercept, degree):
"""Test that B-spline extrapolation works correctly."""
# we use a straight line for that
X = np.linspace(-1, 1, 100)[:, None]
y = X.squeeze()
# 'constant'
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="constant",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])
# 'linear'
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="linear",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])
# 'error'
splt = SplineTransformer(
n_knots=4, degree=degree, include_bias=bias, extrapolation="error"
)
splt.fit(X)
with pytest.raises(ValueError):
splt.transform([[-10]])
with pytest.raises(ValueError):
splt.transform([[5]])
def test_spline_transformer_kbindiscretizer():
"""Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer."""
rng = np.random.RandomState(97531)
X = rng.randn(200).reshape(200, 1)
n_bins = 5
n_knots = n_bins + 1
splt = SplineTransformer(
n_knots=n_knots, degree=0, knots="quantile", include_bias=True
)
splines = splt.fit_transform(X)
kbd = KBinsDiscretizer(
n_bins=n_bins, encode="onehot-dense", strategy="quantile"
)
kbins = kbd.fit_transform(X)
# Though they should be exactly equal, we test approximately with high
# accuracy.
assert_allclose(splines, kbins, rtol=1e-13)
@pytest.mark.parametrize("n_knots", [5, 10])
@pytest.mark.parametrize("include_bias", [True, False])
@pytest.mark.parametrize("degree", [3, 5])
def test_spline_transformer_n_features_out(n_knots, include_bias, degree):
"""Test that transform results in n_features_out_ features."""
splt = SplineTransformer(
n_knots=n_knots,
degree=degree,
include_bias=include_bias
)
X = np.linspace(0, 1, 10)[:, None]
splt.fit(X)
assert splt.transform(X).shape[1] == splt.n_features_out_
| bsd-3-clause |
PrincetonML/SIF | src/train.py | 1 | 10033 |
import sys, os
from time import time
import random
import numpy as np
from params import params
import argparse
from theano import config
import lasagne
from sklearn.decomposition import TruncatedSVD
import data_io
from proj_model_sim import proj_model_sim
from proj_model_sentiment import proj_model_sentiment
import eval
##################################################
def str2bool(v):
"utility function for parsing boolean arguments"
if v is None:
return False
if v.lower() in ("yes", "true", "t", "1"):
return True
if v.lower() in ("no", "false", "f", "0"):
return False
raise ValueError('A type that was supposed to be boolean is not boolean.')
def learner2bool(v):
"utility function for parsing the argument for learning optimization algorithm"
if v is None:
return lasagne.updates.adam
if v.lower() == "adagrad":
return lasagne.updates.adagrad
if v.lower() == "adam":
return lasagne.updates.adam
raise ValueError('A type that was supposed to be a learner is not.')
def get_pc(data, We, weight4ind, params):
"Comput the principal component"
def get_weighted_average(We, x, w):
"Compute the weighted average vectors"
n_samples = x.shape[0]
emb = np.zeros((n_samples, We.shape[1]))
for i in xrange(n_samples):
emb[i,:] = w[i,:].dot(We[x[i,:],:]) / np.count_nonzero(w[i,:])
return emb
for i in data:
i[0].populate_embeddings(words)
if not params.task == "sentiment":
i[1].populate_embeddings(words)
if params.task == "ent":
(scores,g1x,g1mask,g2x,g2mask) = data_io.getDataEntailment(data)
if params.weightfile:
g1mask = data_io.seq2weight(g1x, g1mask, weight4ind)
elif params.task == "sim":
(scores,g1x,g1mask,g2x,g2mask) = data_io.getDataSim(data, -1)
if params.weightfile:
g1mask = data_io.seq2weight(g1x, g1mask, weight4ind)
elif params.task == "sentiment":
(scores,g1x,g1mask) = data_io.getDataSentiment(data)
if params.weightfile:
g1mask = data_io.seq2weight(g1x, g1mask, weight4ind)
emb = get_weighted_average(We, g1x, g1mask)
svd = TruncatedSVD(n_components=params.npc, n_iter=7, random_state=0)
svd.fit(emb)
return svd.components_
def train_util(model, train_data, dev, test, train, words, params):
"utility function for training the model"
start_time = time()
try:
for eidx in xrange(params.epochs):
kf = data_io.get_minibatches_idx(len(train_data), params.batchsize, shuffle=True)
uidx = 0
for _, train_index in kf:
uidx += 1
batch = [train_data[t] for t in train_index]
# load the word ids
for i in batch:
i[0].populate_embeddings(words)
if not params.task == "sentiment":
i[1].populate_embeddings(words)
# load the data
if params.task == "ent":
(scores,g1x,g1mask,g2x,g2mask) = data_io.getDataEntailment(batch)
elif params.task == "sim":
(scores,g1x,g1mask,g2x,g2mask) = data_io.getDataSim(batch, model.nout)
elif params.task == "sentiment":
(scores,g1x,g1mask) = data_io.getDataSentiment(batch)
else:
raise ValueError('Task should be ent or sim.')
# train
if not params.task == "sentiment":
if params.weightfile:
g1mask = data_io.seq2weight(g1x, g1mask, params.weight4ind)
g2mask = data_io.seq2weight(g2x, g2mask, params.weight4ind)
cost = model.train_function(scores, g1x, g2x, g1mask, g2mask)
else:
if params.weightfile:
g1mask = data_io.seq2weight(g1x, g1mask, params.weight4ind)
cost = model.train_function(scores, g1x, g1mask)
if np.isnan(cost) or np.isinf(cost):
print 'NaN detected'
# undo batch to save RAM
for i in batch:
i[0].representation = None
i[0].unpopulate_embeddings()
if not params.task == "sentiment":
i[1].representation = None
i[1].unpopulate_embeddings()
# evaluate
if params.task == "sim":
dp,ds = eval.supervised_evaluate(model,words,dev,params)
tp,ts = eval.supervised_evaluate(model,words,test,params)
rp,rs = eval.supervised_evaluate(model,words,train,params)
print "evaluation: ",dp,ds,tp,ts,rp,rs
elif params.task == "ent" or params.task == "sentiment":
ds = eval.supervised_evaluate(model,words,dev,params)
ts = eval.supervised_evaluate(model,words,test,params)
rs = eval.supervised_evaluate(model,words,train,params)
print "evaluation: ",ds,ts,rs
else:
raise ValueError('Task should be ent or sim.')
print 'Epoch ', (eidx+1), 'Cost ', cost
sys.stdout.flush()
except KeyboardInterrupt:
print "Training interupted"
end_time = time()
print "total time:", (end_time - start_time)
##################################################
# initialize
random.seed(1)
np.random.seed(1)
# parse arguments
print sys.argv
parser = argparse.ArgumentParser()
parser.add_argument("-LW", help="Lambda for word embeddings (normal training).", type=float)
parser.add_argument("-LC", help="Lambda for composition parameters (normal training).", type=float)
parser.add_argument("-batchsize", help="Size of batch.", type=int)
parser.add_argument("-dim", help="Size of input.", type=int)
parser.add_argument("-memsize", help="Size of classification layer.",
type=int)
parser.add_argument("-wordfile", help="Word embedding file.")
parser.add_argument("-layersize", help="Size of output layers in models.", type=int)
parser.add_argument("-updatewords", help="Whether to update the word embeddings")
parser.add_argument("-traindata", help="Training data file.")
parser.add_argument("-devdata", help="Training data file.")
parser.add_argument("-testdata", help="Testing data file.")
parser.add_argument("-nonlinearity", help="Type of nonlinearity in projection and DAN model.",
type=int)
parser.add_argument("-nntype", help="Type of neural network.")
parser.add_argument("-epochs", help="Number of epochs in training.", type=int)
parser.add_argument("-minval", help="Min rating possible in scoring.", type=int)
parser.add_argument("-maxval", help="Max rating possible in scoring.", type=int)
parser.add_argument("-clip", help="Threshold for gradient clipping.",type=int)
parser.add_argument("-eta", help="Learning rate.", type=float)
parser.add_argument("-learner", help="Either AdaGrad or Adam.")
parser.add_argument("-task", help="Either sim, ent, or sentiment.")
parser.add_argument("-weightfile", help="The file containing the weights for words; used in weighted_proj_model_sim.")
parser.add_argument("-weightpara", help="The parameter a used in computing word weights.", type=float)
parser.add_argument("-npc", help="The number of principal components to use.", type=int, default=0)
args = parser.parse_args()
params = params()
params.LW = args.LW
params.LC = args.LC
params.batchsize = args.batchsize
params.hiddensize = args.dim
params.memsize = args.memsize
params.wordfile = args.wordfile
params.nntype = args.nntype
params.layersize = args.layersize
params.updatewords = str2bool(args.updatewords)
params.traindata = args.traindata
params.devdata = args.devdata
params.testdata = args.testdata
params.nntype = args.nntype
params.epochs = args.epochs
params.learner = learner2bool(args.learner)
params.task = args.task
params.weightfile = args.weightfile
params.weightpara = args.weightpara
params.npc = args.npc
if args.eta:
params.eta = args.eta
params.clip = args.clip
if args.clip:
if params.clip == 0:
params.clip = None
params.minval = args.minval
params.maxval = args.maxval
if args.nonlinearity:
if args.nonlinearity == 1:
params.nonlinearity = lasagne.nonlinearities.linear
if args.nonlinearity == 2:
params.nonlinearity = lasagne.nonlinearities.tanh
if args.nonlinearity == 3:
params.nonlinearity = lasagne.nonlinearities.rectify
if args.nonlinearity == 4:
params.nonlinearity = lasagne.nonlinearities.sigmoid
# load data
(words, We) = data_io.getWordmap(params.wordfile)
if args.task == "sim" or args.task == "ent":
train_data = data_io.getSimEntDataset(params.traindata,words,params.task)
elif args.task == "sentiment":
train_data = data_io.getSentimentDataset(params.traindata,words)
else:
raise ValueError('Task should be ent, sim, or sentiment.')
# load weight
if params.weightfile:
word2weight = data_io.getWordWeight(params.weightfile, params.weightpara)
params.weight4ind = data_io.getWeight(words, word2weight)
print 'word weights computed using parameter a=' + str(params.weightpara)
else:
params.weight4ind = []
if params.npc > 0:
params.pc = get_pc(train_data, We, params.weight4ind, params)
else:
params.pc = []
# load model
model = None
if params.nntype == 'proj':
model = proj_model_sim(We, params)
elif params.nntype == 'proj_sentiment':
model = proj_model_sentiment(We, params)
else:
"Error no type specified"
# train
train_util(model, train_data, params.devdata, params.testdata, params.traindata, words, params)
| mit |
sonnyhu/scikit-learn | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |