repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
q1ang/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
rgommers/numpy | numpy/core/numeric.py | 7 | 76727 | import functools
import itertools
import operator
import sys
import warnings
import numbers
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
asfortranarray, broadcast, can_cast, compare_chararrays,
concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
from . import overrides
from . import umath
from . import shape_base
from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._exceptions import TooHardError, AxisError
from ._ufunc_config import errstate
bitwise_not = invert
ufunc = type(sin)
newaxis = None
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_zeros_like_dispatcher)
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.zeros_like(y)
array([0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: C
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty : Return a new uninitialized array.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
if like is not None:
return _ones_with_like(shape, dtype=dtype, order=order, like=like)
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
_ones_with_like = array_function_dispatch(
_ones_dispatcher
)(ones)
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_ones_like_dispatcher)
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.ones_like(y)
array([1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, 1, casting='unsafe')
return res
def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
"""
if like is not None:
return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
_full_with_like = array_function_dispatch(
_full_dispatcher
)(full)
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_full_like_dispatcher)
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None, *, keepdims=False):
"""
Counts the number of non-zero values in the array ``a``.
The word "non-zero" is in reference to the Python 2.x
built-in method ``__nonzero__()`` (renamed ``__bool__()``
in Python 3.x) of Python objects that tests an object's
"truthfulness". For example, any number is considered
truthful if it is nonzero, whereas any string is considered
truthful if it is not the empty string. Thus, this function
(recursively) counts how many elements in ``a`` (and in
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
method evaluated to ``True``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
axis : int or tuple, optional
Axis or tuple of axes along which to count non-zeros.
Default is None, meaning that non-zeros will be counted
along a flattened version of ``a``.
.. versionadded:: 1.12.0
keepdims : bool, optional
If this is set to True, the axes that are counted are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.19.0
Returns
-------
count : int or array of int
Number of non-zero values in the array along a given axis.
Otherwise, the total number of non-zero values in the array
is returned.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> a = np.array([[0, 1, 7, 0],
... [3, 0, 2, 19]])
>>> np.count_nonzero(a)
5
>>> np.count_nonzero(a, axis=0)
array([1, 1, 2, 1])
>>> np.count_nonzero(a, axis=1)
array([2, 3])
>>> np.count_nonzero(a, axis=1, keepdims=True)
array([[2],
[3]])
"""
if axis is None and not keepdims:
return multiarray.count_nonzero(a)
a = asanyarray(a)
# TODO: this works around .astype(bool) not working properly (gh-9847)
if np.issubdtype(a.dtype, np.character):
a_bool = a != a.dtype.type()
else:
a_bool = a.astype(np.bool_, copy=False)
return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
@set_module('numpy')
def isfortran(a):
"""
Check if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
a : ndarray
Input array.
Returns
-------
isfortran : bool
Returns True if the array is Fortran contiguous but *not* C contiguous.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='F'))
False
"""
return a.flags.fnc
def _argwhere_dispatcher(a):
return (a,)
@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
non-zero items.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
# nonzero does not behave well on 0d, so promote to 1d
if np.ndim(a) == 0:
a = shape_base.atleast_1d(a)
# then remove the added dimension
return argwhere(a)[:,:0]
return transpose(nonzero(a))
def _flatnonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return np.nonzero(np.ravel(a))[0]
def _correlate_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
c_{av}[k] = sum_n a[n+k] * conj(v[n])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
scipy.signal.correlate : uses FFT which has superior performance on large arrays.
Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is::
c'_{av}[k] = sum_n a[n] conj(v[n+k])
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
be preferable.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
when the two input sequences change places, i.e.,
``c_{va}[k] = c^{*}_{av}[-k]``:
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
return multiarray.correlate2(a, v, mode)
def _convolve_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The discrete convolution operation is defined as
.. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution",
https://en.wikipedia.org/wiki/Convolution
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
return multiarray.correlate(a, v[::-1], mode)
def _outer_dispatcher(a, b, out=None):
return (a, b, out)
@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
A location where the result is stored
.. versionadded:: 1.9.0
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to dimensions other than 1D and other
operations. ``np.multiply.outer(a.ravel(), b.ravel())``
is the equivalent.
tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
def _tensordot_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes.
Given two tensors, `a` and `b`, and an array_like object containing
two array_like objects, ``(a_axes, b_axes)``, sum the products of
`a`'s and `b`'s elements (components) over the axes specified by
``a_axes`` and ``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
of `a` and the first ``N`` dimensions of `b` are summed over.
Parameters
----------
a, b : array_like
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
Returns
-------
output : ndarray
The tensor dot product of the input.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
The shape of the result consists of the non-contracted axes of the
first tensor, followed by the non-contracted axes of the second.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]])
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([['a', 'b'],
['c', 'd']], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, 1)
array([[['acc', 'bdd'],
['aaacccc', 'bbbdddd']],
[['aaaaacccccc', 'bbbbbdddddd'],
['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[['a', 'b'],
['c', 'd']],
...
>>> np.tensordot(a, A, (0, 1))
array([[['abbbbb', 'cddddd'],
['aabbbbbb', 'ccdddddd']],
[['aaabbbbbbb', 'cccddddddd'],
['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[['abb', 'cdd'],
['aaabbbb', 'cccdddd']],
[['aaaaabbbbbb', 'cccccdddddd'],
['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
"""
try:
iter(axes)
except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def _roll_dispatcher(a, shift, axis=None):
return (a,)
@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Notes
-----
.. versionadded:: 1.12.0
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> np.roll(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, -1)
array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 0]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
>>> np.roll(x2, -1, axis=1)
array([[1, 2, 3, 4, 0],
[6, 7, 8, 9, 5]])
"""
a = asanyarray(a)
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
else:
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
broadcasted = broadcast(shift, axis)
if broadcasted.ndim > 1:
raise ValueError(
"'shift' and 'axis' should be scalars or 1D sequences")
shifts = {ax: 0 for ax in range(a.ndim)}
for sh, ax in broadcasted:
shifts[ax] += sh
rolls = [((slice(None), slice(None)),)] * a.ndim
for ax, offset in shifts.items():
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
if offset:
# (original, result), (original, result)
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
(slice(-offset, None), slice(None, offset)))
result = empty_like(a)
for indices in itertools.product(*rolls):
arr_index, res_index = zip(*indices)
result[res_index] = a[arr_index]
return result
def _rollaxis_dispatcher(a, axis, start=None):
return (a,)
@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
This function continues to be supported for backward compatibility, but you
should prefer `moveaxis`. The `moveaxis` function was added in NumPy
1.11.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to be rolled. The positions of the other axes do not
change relative to one another.
start : int, optional
When ``start <= axis``, the axis is rolled back until it lies in
this position. When ``start > axis``, the axis is rolled until it
lies before this position. The default, 0, results in a "complete"
roll. The following table describes how negative values of ``start``
are interpreted:
.. table::
:align: left
+-------------------+----------------------+
| ``start`` | Normalized ``start`` |
+===================+======================+
| ``-(arr.ndim+1)`` | raise ``AxisError`` |
+-------------------+----------------------+
| ``-arr.ndim`` | 0 |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``-1`` | ``arr.ndim-1`` |
+-------------------+----------------------+
| ``0`` | ``0`` |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``arr.ndim`` | ``arr.ndim`` |
+-------------------+----------------------+
| ``arr.ndim + 1`` | raise ``AxisError`` |
+-------------------+----------------------+
.. |vdots| unicode:: U+22EE .. Vertical Ellipsis
Returns
-------
res : ndarray
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
NumPy versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
See Also
--------
moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return a[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If any axis provided is out of range
ValueError
If an axis is repeated
See also
--------
normalize_axis_index : normalizing a single scalar axis
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
else:
raise ValueError('repeated axis')
return axis
def _moveaxis_dispatcher(a, source, destination):
return (a,)
@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
.. versionadded:: 1.11.0
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose : Permute the dimensions of an array.
swapaxes : Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
try:
# allow duck-array types if they define transpose
transpose = a.transpose
except AttributeError:
a = asarray(a)
transpose = a.transpose
source = normalize_axis_tuple(source, a.ndim, 'source')
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = transpose(order)
return result
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return moveaxis(a, axis, 0)
def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
return (a, b)
@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
array(-3)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
multiply(a0, b2, out=cp1)
negative(cp1, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
tmp = array(a2 * b1)
cp0 -= tmp
multiply(a2, b0, out=cp1)
multiply(a0, b2, out=tmp)
cp1 -= tmp
multiply(a0, b1, out=cp2)
multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a2, b1, out=cp0)
negative(cp0, out=cp0)
multiply(a2, b0, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@set_module('numpy')
def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]])
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
if sparse:
res = tuple()
else:
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
if sparse:
res = res + (idx,)
else:
res[i] = idx
return res
def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
if like is not None:
return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
_fromfunction_with_like = array_function_dispatch(
_fromfunction_dispatcher
)(fromfunction)
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@set_module('numpy')
def isscalar(element):
"""
Returns True if the type of `element` is a scalar type.
Parameters
----------
element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `element` is a scalar type, False if it is not.
See Also
--------
ndim : Get the number of dimensions of an array
Notes
-----
If you need a stricter way to identify a *numerical* scalar, use
``isinstance(x, numbers.Number)``, as that returns ``False`` for most
non-numerical elements such as strings.
In most cases ``np.ndim(x) == 0`` should be used instead of this function,
as that will also return true for 0d arrays. This is how numpy overloads
functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
+======================================+===============+===================+
| PEP 3141 numeric objects (including | ``True`` | ``True`` |
| builtins) | | |
+--------------------------------------+---------------+-------------------+
| builtin string and buffer objects | ``True`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other builtin objects, like | ``False`` | ``True`` |
| `pathlib.Path`, `Exception`, | | |
| the result of `re.compile` | | |
+--------------------------------------+---------------+-------------------+
| third-party objects like | ``False`` | ``True`` |
| `matplotlib.figure.Figure` | | |
+--------------------------------------+---------------+-------------------+
| zero-dimensional numpy arrays | ``False`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other numpy arrays | ``False`` | ``False`` |
+--------------------------------------+---------------+-------------------+
| `list`, `tuple`, and other sequence | ``False`` | ``False`` |
| objects | | |
+--------------------------------------+---------------+-------------------+
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar(np.array(3.1))
False
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
>>> np.isscalar('numpy')
True
NumPy supports PEP 3141 numbers:
>>> from fractions import Fraction
>>> np.isscalar(Fraction(5, 17))
True
>>> from numbers import Number
>>> np.isscalar(Number())
True
"""
return (isinstance(element, generic)
or type(element) in ScalarType
or isinstance(element, numbers.Number))
@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = (binwidth if width is None
else max(binwidth, width))
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return '-' + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def _identity_dispatcher(n, dtype=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if like is not None:
return _identity_with_like(n, dtype=dtype, like=like)
from numpy import eye
return eye(n, dtype=dtype, like=like)
_identity_with_like = array_function_dispatch(
_identity_dispatcher
)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any, equal
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in
some rare cases.
The comparison of `a` and `b` uses standard broadcasting, which
means that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for
`equal` but not `array_equal`.
`allclose` is not defined for non-numeric data types.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
True
"""
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
math.isclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Unlike the built-in `math.isclose`, the above equation is not symmetric
in `a` and `b` -- it assumes `b` is the reference value -- so that
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
the default value of atol is not zero, and is used to determine what
small values should be considered close to zero. The default value is
appropriate for expected values of order unity: if the expected values
are significantly smaller than one, it can result in false positives.
`atol` should be carefully selected for the use case at hand. A zero value
for `atol` will result in `False` if either `a` or `b` is zero.
`isclose` is not defined for non-numeric data types.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True])
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
array([ True, False])
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
array([False, False])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
array([ True, True])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
array([False, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
return less_equal(abs(x-y), atol + rtol * abs(y))
x = asanyarray(a)
y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dt = multiarray.result_type(y, 1.)
y = asanyarray(y, dtype=dt)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
# Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
return cond[()] # Flatten 0d arrays to scalars
def _array_equal_dispatcher(a1, a2, equal_nan=None):
return (a1, a2)
@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
.. versionadded:: 1.19.0
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
if not equal_nan:
return bool(asarray(a1 == a2).all())
# Handling NaN values if equal_nan is True
a1nan, a2nan = isnan(a1), isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
def _array_equiv_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
try:
multiarray.broadcast(a1, a2)
except Exception:
return False
return bool(asarray(a1 == a2).all())
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
def extend_all(module):
existing = set(__all__)
mall = getattr(module, '__all__')
for a in mall:
if a not in existing:
__all__.append(a)
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
from . import arrayprint
from .arrayprint import *
from . import _asarray
from ._asarray import *
from . import _ufunc_config
from ._ufunc_config import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
extend_all(arrayprint)
extend_all(_asarray)
extend_all(_ufunc_config)
| bsd-3-clause |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/plot_legislation/plot_ticpe_taux_implicite.py | 4 | 2264 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 18:06:45 2015
@author: thomas.douenne
TICPE: Taxe intérieure sur la consommation des produits énergétiques
"""
# L'objectif de ce script est d'illustrer graphiquement l'évolution du taux implicite de la TICPE depuis 1993.
# On étudie ce taux pour le diesel, et pour les carburants sans plombs.
# Import de modules généraux
from pandas import concat
# Import de modules spécifiques à Openfisca
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_bar_list
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_accises import get_accises_carburants
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_tva import get_tva_taux_plein
from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_prix_carburants import \
get_prix_carburants
# Appel des paramètres de la législation et des prix
ticpe = ['ticpe_gazole', 'ticpe_super9598']
accise_diesel = get_accises_carburants(ticpe)
prix_ttc = ['diesel_ttc', 'super_95_ttc']
prix_carburants = get_prix_carburants(prix_ttc)
tva_taux_plein = get_tva_taux_plein()
# Création d'une dataframe contenant ces paramètres
df_taux_implicite = concat([accise_diesel, prix_carburants, tva_taux_plein], axis = 1)
df_taux_implicite.rename(columns = {'value': 'taux plein tva'}, inplace = True)
# A partir des paramètres, calcul des taux de taxation implicites
df_taux_implicite['taux_implicite_diesel'] = (
df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva']) /
(df_taux_implicite['prix diesel ttc'] -
(df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva'])))
)
df_taux_implicite['taux_implicite_sp95'] = (
df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva']) /
(df_taux_implicite['prix super 95 ttc'] -
(df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva'])))
)
df_taux_implicite = df_taux_implicite.dropna()
# Réalisation des graphiques
graph_builder_bar_list(df_taux_implicite['taux_implicite_diesel'], 1, 1)
graph_builder_bar_list(df_taux_implicite['taux_implicite_sp95'], 1, 1)
| agpl-3.0 |
kylerbrown/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
calispac/digicampipe | digicampipe/scripts/spe.py | 1 | 14553 | #!/usr/bin/env python
"""
Do the Single Photoelectron anaylsis
Usage:
digicam-spe [options] [--] <INPUT>...
Options:
-h --help Show this screen.
--max_events=N Maximum number of events to analyse.
--max_histo_filename=FILE File path of the max histogram.
[Default: ./max_histo.pk]
--charge_histo_filename=FILE File path of the charge histogram
[Default: ./charge_histo.pk]
--raw_histo_filename=FILE File path of the raw histogram
[Default: ./raw_histo.pk]
-o OUTPUT --output=OUTPUT Output file path to store the results.
[Default: ./results.npz]
-c --compute Compute the data.
-f --fit Fit.
-d --display Display.
-v --debug Enter the debug mode.
-p --pixel=<PIXEL> Give a list of pixel IDs.
--shift=N Number of bins to shift before integrating
[default: 0].
--integral_width=N Number of bins to integrate over
[default: 7].
--pulse_finder_threshold=F Threshold of pulse finder in arbitrary units
[default: 2.0].
--save_figures=PATH Save the plots to the indicated folder.
Figures are not saved is set to none
[default: none]
--ncall=N Number of calls for the fit [default: 10000]
--n_samples=N Number of samples per waveform
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from histogram.histogram import Histogram1D
from tqdm import tqdm
from digicampipe.calib.baseline import fill_baseline, subtract_baseline
from digicampipe.calib.charge import compute_charge
from digicampipe.calib.peak import find_pulse_with_max, \
find_pulse_fast
from digicampipe.io.event_stream import calibration_event_stream
from digicampipe.scripts import raw
from digicampipe.scripts.fmpe import FMPEFitter
from digicampipe.utils.docopt import convert_pixel_args, \
convert_int, convert_text
from digicampipe.utils.pdf import fmpe_pdf_10
class MaxHistoFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 2
super(MaxHistoFitter, self).__init__(histogram, estimated_gain,
n_peaks, **kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_0': None, 'a_1': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_0, a_1):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': a_0, 'a_1': a_1, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
class SPEFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 4
super(SPEFitter, self).__init__(histogram, estimated_gain, n_peaks,
**kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_1': None, 'a_2': None, 'a_3': None,
'a_4': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_1, a_2, a_3, a_4):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': 0, 'a_1': a_1, 'a_2': a_2,
'a_3': a_3, 'a_4': a_4, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
def initialize_fit(self):
init_params = super(SPEFitter, self).initialize_fit()
init_params['a_4'] = init_params['a_3']
init_params['a_3'] = init_params['a_2']
init_params['a_2'] = init_params['a_1']
init_params['a_1'] = init_params['a_0']
init_params['baseline'] = init_params['baseline'] - init_params['gain']
del init_params['a_0']
self.initial_parameters = init_params
return init_params
def compute_dark_rate(number_of_zeros, total_number_of_events, time):
p_0 = number_of_zeros / total_number_of_events
rate = - np.log(p_0)
rate /= time
return rate
def compute_max_histo(files, histo_filename, pixel_id, max_events,
integral_width, shift, baseline):
n_pixels = len(pixel_id)
if not os.path.exists(histo_filename):
events = calibration_event_stream(files, pixel_id=pixel_id,
max_events=max_events)
# events = compute_baseline_with_min(events)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
events = find_pulse_with_max(events)
events = compute_charge(events, integral_width, shift)
max_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * integral_width,
4095 * integral_width),
)
for event in events:
max_histo.fill(event.data.reconstructed_charge)
max_histo.save(histo_filename)
return max_histo
else:
max_histo = Histogram1D.load(histo_filename)
return max_histo
def compute_spe(files, histo_filename, pixel_id, baseline, max_events,
integral_width, shift, pulse_finder_threshold, debug=False):
if not os.path.exists(histo_filename):
n_pixels = len(pixel_id)
events = calibration_event_stream(files,
max_events=max_events,
pixel_id=pixel_id)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
# events = find_pulse_1(events, 0.5, 20)
# events = find_pulse_2(events, widths=[5, 6], threshold_sigma=2)
events = find_pulse_fast(events, threshold=pulse_finder_threshold)
# events = find_pulse_fast_2(events, threshold=pulse_finder_threshold,
# min_dist=3)
# events = find_pulse_correlate(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_gaussian_filter(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_wavelets(events, widths=[4, 5, 6],
# threshold_sigma=2)
events = compute_charge(events, integral_width=integral_width,
shift=shift)
# events = compute_amplitude(events)
# events = fit_template(events)
# events = compute_full_waveform_charge(events)
spe_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * 50, 4095 * 50)
)
for event in events:
spe_histo.fill(event.data.reconstructed_charge)
spe_histo.save(histo_filename)
return spe_histo
else:
spe_histo = Histogram1D.load(histo_filename)
return spe_histo
def entry():
args = docopt(__doc__)
files = args['<INPUT>']
debug = args['--debug']
max_events = convert_int(args['--max_events'])
raw_histo_filename = args['--raw_histo_filename']
charge_histo_filename = args['--charge_histo_filename']
max_histo_filename = args['--max_histo_filename']
results_filename = args['--output']
pixel_id = convert_pixel_args(args['--pixel'])
n_pixels = len(pixel_id)
integral_width = int(args['--integral_width'])
shift = int(args['--shift'])
pulse_finder_threshold = float(args['--pulse_finder_threshold'])
n_samples = int(args['--n_samples']) # TODO access this in a better way !
estimated_gain = 20
ncall = int(args['--ncall'])
if args['--compute']:
raw_histo = raw.compute(files, max_events=max_events,
pixel_id=pixel_id, filename=raw_histo_filename)
baseline = raw_histo.mode()
compute_max_histo(files, max_histo_filename, pixel_id, max_events,
integral_width, shift, baseline)
compute_spe(files, charge_histo_filename, pixel_id, baseline,
max_events, integral_width, shift, pulse_finder_threshold,
debug=debug)
if args['--fit']:
spe_histo = Histogram1D.load(charge_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
dark_count_rate = np.zeros(n_pixels) * np.nan
electronic_noise = np.zeros(n_pixels) * np.nan
crosstalk = np.zeros(n_pixels) * np.nan
gain = np.zeros(n_pixels) * np.nan
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = max_histo[i]
fitter = MaxHistoFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
n_entries = histo.data.sum()
number_of_zeros = fitter.parameters['a_0']
window_length = 4 * n_samples
rate = compute_dark_rate(number_of_zeros,
n_entries,
window_length)
electronic_noise[i] = fitter.parameters['sigma_e']
dark_count_rate[i] = rate
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute dark count rate'
' in pixel {}'.format(pixel))
print(e)
np.savez(results_filename, dcr=dark_count_rate,
sigma_e=electronic_noise, pixel_id=pixel_id)
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = spe_histo[i]
fitter = SPEFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
params = fitter.parameters
n_entries = params['a_1']
n_entries += params['a_2']
n_entries += params['a_3']
n_entries += params['a_4']
crosstalk[i] = (n_entries - params['a_1']) / n_entries
gain[i] = params['gain']
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute gain and crosstalk'
' in pixel {}'.format(pixel))
print(e)
data = dict(np.load(results_filename))
data['crosstalk'] = crosstalk
data['gain'] = gain
np.savez(results_filename, **data)
save_figure = convert_text(args['--save_figures'])
if save_figure is not None:
output_path = save_figure
spe_histo = Histogram1D.load(charge_histo_filename)
spe_amplitude = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(raw_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
figure_directory = output_path + 'figures/'
if not os.path.exists(figure_directory):
os.makedirs(figure_directory)
histograms = [spe_histo, spe_amplitude, raw_histo, max_histo]
names = ['histogram_charge/', 'histogram_amplitude/', 'histogram_raw/',
'histo_max/']
for i, histo in enumerate(histograms):
figure = plt.figure()
histogram_figure_directory = figure_directory + names[i]
if not os.path.exists(histogram_figure_directory):
os.makedirs(histogram_figure_directory)
for j, pixel in enumerate(pixel_id):
axis = figure.add_subplot(111)
figure_path = histogram_figure_directory + 'pixel_{}'. \
format(pixel)
try:
histo.draw(index=(j,), axis=axis, log=True, legend=False)
figure.savefig(figure_path)
except Exception as e:
print('Could not save pixel {} to : {} \n'.
format(pixel, figure_path))
print(e)
axis.remove()
if args['--display']:
spe_histo = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(os.path.join(output_path,
raw_histo_filename))
max_histo = Histogram1D.load(max_histo_filename)
spe_histo.draw(index=(0,), log=True, legend=False)
raw_histo.draw(index=(0,), log=True, legend=False)
max_histo.draw(index=(0,), log=True, legend=False)
try:
data = np.load(results_filename)
dark_count_rate = data['dcr']
electronic_noise = data['sigma_e']
crosstalk = data['crosstalk']
gain = data['gain']
except IOError as e:
print(e)
print('Could not find the analysis files !')
plt.figure()
plt.hist(dark_count_rate[np.isfinite(dark_count_rate)],
bins='auto')
plt.xlabel('dark count rate [GHz]')
plt.legend(loc='best')
plt.figure()
plt.hist(crosstalk[np.isfinite(crosstalk)],
bins='auto')
plt.xlabel('Crosstalk []')
plt.legend(loc='best')
plt.figure()
plt.hist(gain[np.isfinite(gain)],
bins='auto')
plt.xlabel('Gain [LSB/p.e.]')
plt.legend(loc='best')
plt.figure()
plt.hist(electronic_noise[np.isfinite(electronic_noise)],
bins='auto')
plt.xlabel('$\sigma_e$ [LSB]')
plt.legend(loc='best')
plt.show()
return
if __name__ == '__main__':
entry()
| gpl-3.0 |
rbharath/pande-gas | vs_utils/utils/dragon_utils.py | 3 | 5800 | """
Dragon utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from cStringIO import StringIO
import numpy as np
import os
import pandas as pd
import subprocess
import tempfile
from vs_utils.utils import SmilesGenerator
class Dragon(object):
"""
Wrapper for dragon6shell.
Parameters
----------
subset : str, optional (default '2d')
Descriptor subset.
kwargs : dict, optional
Keyword arguments for SmilesGenerator.
"""
def __init__(self, subset='2d', **kwargs):
self.subset = subset
self.initialized = False
self.config_filename, self.smiles_engine = None, None
self.smiles_engine_kwargs = kwargs
def initialize(self):
"""
Initialize.
This is not part of __init__ because it breaks IPython.parallel.
"""
fd, self.config_filename = tempfile.mkstemp()
os.close(fd)
with open(self.config_filename, 'wb') as f:
f.write(self.get_config())
self.smiles_engine = SmilesGenerator(**self.smiles_engine_kwargs)
self.initialized = True
def __del__(self):
"""
Cleanup.
"""
if self.config_filename is not None:
os.unlink(self.config_filename)
def get_config(self):
"""
Get configuration file.
"""
if self.subset == '2d':
return """<?xml version="1.0" encoding="utf-8"?>
<DRAGON version="6.0.36" script_version="1" generation_date="2014/11/17">
<OPTIONS>
<CheckUpdates value="true"/>
<SaveLayout value="true"/>
<ShowWorksheet value="false"/>
<Decimal_Separator value="."/>
<Missing_String value="NaN"/>
<DefaultMolFormat value="1"/>
<HelpBrowser value="/usr/bin/xdg-open"/>
<RejectUnusualValence value="false"/>
<Add2DHydrogens value="false"/>
<MaxSRforAllCircuit value="19"/>
<MaxSR value="35"/>
<MaxSRDetour value="30"/>
<MaxAtomWalkPath value="2000"/>
<LogPathWalk value="true"/>
<LogEdge value="true"/>
<Weights>
<weight name="Mass"/>
<weight name="VdWVolume"/>
<weight name="Electronegativity"/>
<weight name="Polarizability"/>
<weight name="Ionization"/>
<weight name="I-State"/>
</Weights>
<SaveOnlyData value="false"/>
<SaveLabelsOnSeparateFile value="false"/>
<SaveFormatBlock value="%b - %n.txt"/>
<SaveFormatSubBlock value="%b-%s - %n - %m.txt"/>
<SaveExcludeMisVal value="false"/>
<SaveExcludeAllMisVal value="false"/>
<SaveExcludeConst value="false"/>
<SaveExcludeNearConst value="false"/>
<SaveExcludeStdDev value="false"/>
<SaveStdDevThreshold value="0.0001"/>
<SaveExcludeCorrelated value="false"/>
<SaveCorrThreshold value="0.95"/>
<SaveExclusionOptionsToVariables value="false"/>
<SaveExcludeMisMolecules value="false"/>
<SaveExcludeRejectedMolecules value="false"/>
</OPTIONS>
<DESCRIPTORS>
<block id="1" SelectAll="true"/>
<block id="2" SelectAll="true"/>
<block id="3" SelectAll="true"/>
<block id="4" SelectAll="true"/>
<block id="5" SelectAll="true"/>
<block id="6" SelectAll="true"/>
<block id="7" SelectAll="true"/>
<block id="8" SelectAll="true"/>
<block id="9" SelectAll="true"/>
<block id="10" SelectAll="true"/>
<block id="11" SelectAll="true"/>
<block id="12" SelectAll="true"/>
<block id="21" SelectAll="true"/>
<block id="22" SelectAll="true"/>
<block id="23" SelectAll="true"/>
<block id="24" SelectAll="true"/>
<block id="25" SelectAll="true"/>
<block id="28" SelectAll="true"/>
<block id="29" SelectAll="true"/>
</DESCRIPTORS>
<MOLFILES>
<molInput value="stdin"/>
<molInputFormat value="SMILES"/>
</MOLFILES>
<OUTPUT>
<SaveStdOut value="true"/>
<SaveProject value="false"/>
<SaveFile value="false"/>
<logMode value="stderr"/>
</OUTPUT>
</DRAGON>
"""
else:
raise NotImplementedError
def get_descriptors(self, mols):
"""
Parameters
----------
mols : array_like
Molecules.
"""
if not self.initialized:
self.initialize()
smiles = [self.smiles_engine.get_smiles(mol) for mol in mols]
args = ['dragon6shell', '-s', self.config_filename]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate('\n'.join(smiles))
if not stdout:
raise RuntimeError(stderr)
data, names = self.parse_descriptors(stdout)
# adjust for skipped molecules
# descriptors are in same order as smiles
missing = np.setdiff1d(smiles, names)
features = np.zeros(len(smiles), dtype=object)
idx = 0 # index into calculated features
for i, this_smiles in enumerate(smiles):
if this_smiles in missing:
features[i] = None
else:
assert this_smiles == names[idx] # confirm match
features[i] = data[idx]
idx += 1
assert len(features) == len(mols)
return features
def parse_descriptors(self, string):
"""
Parse Dragon descriptors.
Parameters
----------
string : str
Output from dragon6shell.
"""
df = pd.read_table(StringIO(string))
if self.subset == '2d':
del df['nHBonds'], df['Psi_e_1d'], df['Psi_e_1s']
# extract names
names = df['NAME'].values
# delete No. and NAME columns
del df['No.'], df['NAME']
return np.asarray(df, dtype=float), names
| bsd-3-clause |
beepee14/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_14_evaluation_metrics/evaluate_poi_identifier.py | 1 | 2588 | #!/usr/bin/python
"""
starter code for the evaluation mini-project
start by copying your trained/tested POI identifier from
that you built in the validation mini-project
the second step toward building your POI identifier!
start by loading/formatting the data
"""
import pickle
import sys
sys.path.append("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/tools/")
from feature_format import featureFormat, targetFeatureSplit
from sklearn.tree import DecisionTreeClassifier
from sklearn import cross_validation
import numpy as np
data_dict = pickle.load(open("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/final_project/final_project_dataset.pkl", "r") )
### add more features to features_list!
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
### your code goes here
features_train,features_test,labels_train,labels_test = cross_validation.train_test_split(features,labels,test_size=0.3,
random_state=42)
clf = DecisionTreeClassifier()
clf.fit(features_train,labels_train)
clf.score(features_test,labels_test)
# How many POIs are in the test set for your POI identifier?
pred = clf.predict(features_test)
sum(pred)
print len([e for e in labels_test if e == 1.0])
# How many people total are in your test set?
len(pred)
# If your identifier predicted 0. (not POI) for everyone in the test set, what would its accuracy be?
1.0 - 5.0/29
# Precision and recall can help illuminate your performance better.
# Use the precision_score and recall_score available in sklearn.metrics to compute those quantities.
# What’s the precision?
from sklearn.metrics import *
precision_score(labels_test, pred)
# What’s the recall?
recall_score(labels_test, pred)
# Here are some made-up predictions and true labels for a hypothetical test set;
# fill in the following boxes to practice identifying true positives, false positives, true negatives, and false negatives.
# Let’s use the convention that “1” signifies a positive result, and “0” a negative.
predictions = [0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]
true_labels = [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0]
# What's the precision of this classifier?
precision_score(true_labels, predictions)
# What's the recall of this classifier?
recall_score(true_labels, predictions)
| mit |
latticelabs/Mitty | mitty/benchmarking/misalignment_plot.py | 1 | 9184 | """Prepare a binned matrix of misalignments and plot it in different ways"""
import click
import pysam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
import numpy as np
def we_have_too_many_bins(bins):
return sum([len(bb) for bb in bins]) > 5000 # This is our threshold for too many bins to compute
def autoscale_bin_size(chrom_lens, bin_cnt=100.0):
return int(sum(chrom_lens) / bin_cnt)
def compute_misalignment_matrix_from_bam(bam_fp, bin_size=None, i_know_what_i_am_doing=False):
"""Create a matrix of binned mis-alignments
:param bam_fp: input BAM
:param bin_size: size of bin in mega bases
:param i_know_what_i_am_doing: Set this to override the runtime warning of too many bins
"""
def binnify(_pos, _bins):
for n in range(1, len(_bins)):
if _pos < _bins[n]:
return n - 1
return len(_bins) - 1 # Should not get here
chrom_lens = [hdr['LN'] for hdr in bam_fp.header['SQ']]
bin_size = bin_size * 1e6 if bin_size is not None else autoscale_bin_size(chrom_lens)
bins = [np.array(range(0, hdr['LN'], bin_size) + [hdr['LN']], dtype=int) for hdr in bam_fp.header['SQ']]
if not i_know_what_i_am_doing and we_have_too_many_bins(bins):
raise RuntimeWarning('The number of bins will be very large. '
'If you are sure you want to do this, '
'use the --i-know-what-i-am-doing flag.')
bin_centers = [(bb[:-1] + bb[1:]) / 2.0 for bb in bins]
# Rows = source (correct pos) Cols = destination (aligned pos)
matrices = [[np.zeros(shape=(len(bins[j]) - 1, len(bins[i]) - 1), dtype='uint32') for i in range(len(bins))] for j in range(len(bins))]
# TAG TYPE VALUE
# XR i Aligned chromosome
# XP i Aligned pos
for r in bam_fp:
c_chrom, c_pos, a_chrom, a_pos = r.reference_id, r.pos, r.get_tag('XR'), r.get_tag('XP')
c_pos_binned, a_pos_binned = binnify(c_pos, bins[c_chrom]), binnify(a_pos, bins[a_chrom])
matrices[c_chrom][a_chrom][c_pos_binned, a_pos_binned] += 1
return chrom_lens, bins, bin_centers, matrices
def plot_genome_as_a_circle(ax, chrom_lens, chrom_gap=np.pi / 50, chrom_radius=1.0, chrom_thick=5, r_max=1.05):
"""Plot the chromosomes on a circle."""
total_len = sum(chrom_lens)
radians_per_base = (2.0 * np.pi - len(chrom_lens) * chrom_gap) / total_len # With allowance for chrom gaps
theta_stops, x_ticks, x_tick_labels = [], [], []
delta_radian = 0.01
start_radian = 0
for ch_no, l in enumerate(chrom_lens):
end_radian = start_radian + l * radians_per_base
theta = np.arange(start_radian, end_radian, delta_radian)
theta_stops.append((start_radian, end_radian))
ax.plot(theta, [chrom_radius * 1.01] * theta.size, lw=chrom_thick, zorder=-1) # , color=[.3, .3, .3])
x_ticks.append((start_radian + end_radian)/2)
x_tick_labels.append(str(ch_no + 1))
start_radian = end_radian + chrom_gap
plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels)
ax.set_rmax(r_max)
return theta_stops
def plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops,
chrom_radius=1.0, scaling_factor=0.01):
scaling_factor *= 0.01
# http://matplotlib.org/users/path_tutorial.html
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_origin, range_bp_dest = float(chrom_lens[i]), float(chrom_lens[j])
offset_origin, offset_dest = theta_stops[i][0], theta_stops[j][0]
range_origin, range_dest = theta_stops[i][1] - theta_stops[i][0], theta_stops[j][1] - theta_stops[j][0]
scale_origin, scale_dest = range_origin / range_bp_origin, range_dest / range_bp_dest
c_origin, c_dest = offset_origin + bin_centers[i] * scale_origin, offset_dest + bin_centers[j] * scale_dest
this_origin, this_dest = np.tile(c_origin, c_dest.shape[0]), np.repeat(c_dest, c_origin.shape[0])
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
for ii in idx:
t0, t1 = this_origin[ii], this_dest[ii]
this_radius = max(min(1.0, abs(t1 - t0) / np.pi), 0.05) * chrom_radius
vertices = [
(t0, chrom_radius), # P0
(t0, chrom_radius - this_radius), # P1
(t1, chrom_radius - this_radius), # P2
(t1, chrom_radius), # P3
]
path = Path(vertices, codes)
patch = patches.PathPatch(path, facecolor='none', lw=scaling_factor * mat_flat[ii])
ax.add_patch(patch)
def circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor):
"""Plot the confusion matrix as a circle plot."""
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
theta_stops = plot_genome_as_a_circle(ax, chrom_lens)
plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=scaling_factor)
def plot_genome_as_a_square(ax, bins, chrom_gap=1000, chrom_thick=5):
"""Plot the chromosomes on a matrix."""
start_pos, linear_stops, x_ticks, x_tick_labels = chrom_gap, [], [], []
for ch_no, b in enumerate(bins):
linear_stops.append([start_pos, start_pos + b[-1]])
ax.plot([x + start_pos for x in b], [0 for _ in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
ax.plot([0 for _ in b], [x + start_pos for x in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
x_ticks.append((start_pos + start_pos + b[-1]) / 2)
x_tick_labels.append(str(ch_no + 1))
start_pos += b[-1] + chrom_gap
#plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels, yticks=x_ticks, yticklabels=x_tick_labels)
return linear_stops
def plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=1.0, plot_grid=True):
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_x, range_bp_y = float(chrom_lens[i]), float(chrom_lens[j])
offset_x, offset_y = linear_stops[i][0], linear_stops[j][0]
range_x, range_y = linear_stops[i][1] - linear_stops[i][0], linear_stops[j][1] - linear_stops[j][0]
scale_x, scale_y = range_x / range_bp_x, range_y / range_bp_y
cx, cy = offset_x + bin_centers[i] * scale_x, offset_y + bin_centers[j] * scale_y
this_x, this_y = np.tile(cx, cy.shape[0]), np.repeat(cy, cx.shape[0])
if plot_grid: ax.plot(this_x, this_y, '.', color=(0.8, 0.8, 0.8), ms=2, zorder=-1)
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
if idx.size > 0:
ax.scatter(this_x[idx], this_y[idx], mat_flat[idx] * scaling_factor, facecolors='none')
def matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=True):
"""Plot the confusion matrix as a ... matrix."""
fig = plt.figure()
ax = fig.add_subplot(111)
linear_stops = plot_genome_as_a_square(ax, bins, chrom_gap=max(chrom_lens) * 0.1)
plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=scaling_factor, plot_grid=plot_grid)
plt.setp(ax, aspect=1, xlabel='Correct', ylabel='Aligned')
def is_grid_too_dense(bins):
return sum([len(bb) for bb in bins]) > 100 # This is our threshold for too dense a grid to show
def auto_scale_scaling_factor(matrices, scale=1000.0):
return scale / max([matrices[i][j].max() for i in range(len(matrices)) for j in range(len(matrices[i]))])
@click.command()
@click.argument('badbam', type=click.Path(exists=True))
@click.option('--circle', type=click.Path(), help='Name of figure file for circle plot')
@click.option('--matrix', type=click.Path(), help='Name of figure file for matrix plot')
@click.option('--bin-size', type=float, default=None, help='Bin size in Mb. Omit to auto-scale')
@click.option('--scaling-factor', type=float, default=None, help='Scale size of disks/lines in plot. Omit to auto-scale')
@click.option('--i-know-what-i-am-doing', is_flag=True, help='Override bin density safety')
def cli(badbam, circle, matrix, bin_size, scaling_factor, i_know_what_i_am_doing):
"""Prepare a binned matrix of mis-alignments and plot it in different ways"""
chrom_lens, bins, bin_centers, matrices = \
compute_misalignment_matrix_from_bam(pysam.AlignmentFile(badbam, 'rb'),
bin_size=bin_size, i_know_what_i_am_doing=i_know_what_i_am_doing)
scaling_factor = scaling_factor or auto_scale_scaling_factor(matrices)
if circle is not None:
circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor)
plt.savefig(circle)
if matrix is not None:
matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor,
plot_grid=not is_grid_too_dense(bins))
plt.savefig(matrix)
if __name__ == '__main__':
cli() | gpl-2.0 |
APMonitor/arduino | 2_Regression/2nd_order_MIMO/GEKKO/tclab_2nd_order_linear.py | 1 | 3283 | import numpy as np
import time
import matplotlib.pyplot as plt
import random
# get gekko package with:
# pip install gekko
from gekko import GEKKO
import pandas as pd
# import data
data = pd.read_csv('data.txt')
tm = data['Time (sec)'].values
Q1s = data[' Heater 1'].values
Q2s = data[' Heater 2'].values
T1s = data[' Temperature 1'].values
T2s = data[' Temperature 2'].values
#########################################################
# Initialize Model as Estimator
#########################################################
m = GEKKO(name='tclab-mhe')
#m.server = 'http://127.0.0.1' # if local server is installed
# 120 second time horizon, 40 steps
m.time = tm
# Parameters to Estimate
K1 = m.FV(value=0.5)
K1.STATUS = 1
K1.FSTATUS = 0
K1.LOWER = 0.1
K1.UPPER = 1.0
K2 = m.FV(value=0.3)
K2.STATUS = 1
K2.FSTATUS = 0
K2.LOWER = 0.1
K2.UPPER = 1.0
K3 = m.FV(value=0.1)
K3.STATUS = 1
K3.FSTATUS = 0
K3.LOWER = 0.0001
K3.UPPER = 1.0
tau12 = m.FV(value=150)
tau12.STATUS = 1
tau12.FSTATUS = 0
tau12.LOWER = 50.0
tau12.UPPER = 250
tau3 = m.FV(value=15)
tau3.STATUS = 0
tau3.FSTATUS = 0
tau3.LOWER = 10
tau3.UPPER = 20
# Measured inputs
Q1 = m.MV(value=0)
Q1.FSTATUS = 1 # measured
Q1.value = Q1s
Q2 = m.MV(value=0)
Q2.FSTATUS = 1 # measured
Q2.value = Q2s
# Ambient temperature
Ta = m.Param(value=23.0) # degC
# State variables
TH1 = m.SV(value=T1s[0])
TH2 = m.SV(value=T2s[0])
# Measurements for model alignment
TC1 = m.CV(value=T1s)
TC1.STATUS = 1 # minimize error between simulation and measurement
TC1.FSTATUS = 1 # receive measurement
TC1.MEAS_GAP = 0.1 # measurement deadband gap
TC2 = m.CV(value=T1s[0])
TC2.STATUS = 1 # minimize error between simulation and measurement
TC2.FSTATUS = 1 # receive measurement
TC2.MEAS_GAP = 0.1 # measurement deadband gap
TC2.value = T2s
# Heat transfer between two heaters
DT = m.Intermediate(TH2-TH1)
# Empirical correlations
m.Equation(tau12 * TH1.dt() + (TH1-Ta) == K1*Q1 + K3*DT)
m.Equation(tau12 * TH2.dt() + (TH2-Ta) == K2*Q2 - K3*DT)
m.Equation(tau3 * TC1.dt() + TC1 == TH1)
m.Equation(tau3 * TC2.dt() + TC2 == TH2)
# Global Options
m.options.IMODE = 5 # MHE
m.options.EV_TYPE = 2 # Objective type
m.options.NODES = 3 # Collocation nodes
m.options.SOLVER = 3 # IPOPT
m.options.COLDSTART = 0 # COLDSTART on first cycle
# Predict Parameters and Temperatures
# use remote=False for local solve
m.solve()
# Create plot
plt.figure(figsize=(10,7))
ax=plt.subplot(2,1,1)
ax.grid()
plt.plot(tm,T1s,'ro',label=r'$T_1$ measured')
plt.plot(tm,TC1.value,'k-',label=r'$T_1$ predicted')
plt.plot(tm,T2s,'bx',label=r'$T_2$ measured')
plt.plot(tm,TC2.value,'k--',label=r'$T_2$ predicted')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(2,1,2)
ax.grid()
plt.plot(tm,Q1s,'r-',label=r'$Q_1$')
plt.plot(tm,Q2s,'b:',label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
# Print optimal values
print('K1: ' + str(K1.newval))
print('K2: ' + str(K2.newval))
print('K3: ' + str(K3.newval))
print('tau12: ' + str(tau12.newval))
print('tau3: ' + str(tau3.newval))
# Save figure
plt.savefig('tclab_estimation.png')
plt.show()
| apache-2.0 |
miyyer/qb | qanta/hyperparam.py | 2 | 1848 | import copy
import json
import yaml
from sklearn.model_selection import ParameterGrid
def expand_config(base_file, hyper_file, output_file):
"""
This is useful for taking the qanta.yaml config, a set of values to try for different hyper parameters, and
generating a configuration representing each value in the parameter sweep
"""
with open(base_file) as f:
base_conf = yaml.load(f)
with open(hyper_file) as f:
hyper_conf = yaml.load(f)
all_base_guessers = base_conf["guessers"]
final_guessers = {}
for guesser, params in hyper_conf["parameters"].items():
base_guesser_conf = all_base_guessers[guesser]
if len(base_guesser_conf) != 1:
raise ValueError(
"More than one configuration for parameter tuning base is invalid"
)
base_guesser_conf = base_guesser_conf[0]
parameter_set = set(base_guesser_conf.keys()) | set(params.keys())
param_grid = {}
for p in parameter_set:
if p in params:
param_grid[p] = params[p]
else:
param_grid[p] = [base_guesser_conf[p]]
parameter_list = list(ParameterGrid(param_grid))
final_guessers[guesser] = parameter_list
final_conf = copy.deepcopy(base_conf)
for g in final_conf["guessers"]:
if g in final_guessers:
final_conf["guessers"][g] = copy.deepcopy(final_guessers[g])
# There is a bug in yaml.dump that doesn't handle outputting nested dicts/arrays correctly. I didn't want to debug
# So instead output to json then convert that to yaml
with open("/tmp/qanta-tmp.json", "w") as f:
json.dump(final_conf, f)
with open("/tmp/qanta-tmp.json") as f:
conf = json.load(f)
with open(output_file, "w") as f:
yaml.dump(conf, f)
| mit |
rizac/gfz-reportgen | gfzreport/sphinxbuild/map/__init__.py | 2 | 43603 | '''
This module implements the function `plotmap` which plots scattered points on a map
retrieved using ArgGIS Server REST API. The function is highly customizable and is basically a
wrapper around the `Basemap` library (for the map background)
plus matplotlib utilities (for plotting points, shapes, labels and legend)
Created on Mar 10, 2016
@author: riccardo
'''
import numpy as np
import re
from itertools import izip, chain
from urllib2 import URLError, HTTPError
import socket
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.basemap import Basemap
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
def parse_margins(obj, parsefunc=lambda margins: [float(val) for val in margins]):
"""Parses obj returning a 4 element numpy array denoting the top, right, bottom and left
values. This function first converts obj to a 4 element list L, and then
calls `parsefunc`, which by default converts all L values into float
:param obj: either None, a number, a list of numbers (allowed lengths: 1 to 4),
a comma/semicolon/spaces separated string (e.g. "4deg 0.0", "1, 1.2", "2km,4deg", "1 ; 2")
:param parsefunc: a function to be applied to obj converted to list. By default, returns
float(v) for any v in L
:return: a 4 element numpy array of floats denoting the top, right, bottom, left values of
the margins. The idea is the same as css margins, as depicted in the table below.
:Examples:
Called f `parsefunc`, then:
============= =========================
obj is returns
============= =========================
None [0, 0, 0, 0]
------------- -------------------------
string the list obtained after
splitting string via
regexp where comma,
semicolon and spaces
are valid separators
------------- -------------------------
x or [x] parsefunc([x, x, x, x])
------------- -------------------------
[x, y] parsefunc([x, y ,x, y])
------------- -------------------------
[x, y, z] parsefunc([x, y, z, y])
------------- -------------------------
[x, y, z, t] parsefunc([x, y, z, t])
============= =========================
"""
if obj is None:
margins = [0] * 4
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
# is an iterable not string. Note the if above is py2 py3 compatible
margins = list(obj)
else:
try:
margins = [float(obj)] * 4
except (TypeError, ValueError):
margins = re.compile("(?:\\s*,\\s*|\\s*;\\s*|\\s+)").split(obj)
if len(margins) == 1:
margins *= 4
elif len(margins) == 2:
margins *= 2
elif len(margins) == 3:
margins.append(margins[1])
elif len(margins) != 4:
raise ValueError("unable to parse margins on invalid value '%s'" % obj)
return np.asarray(parsefunc(margins) if hasattr(parsefunc, "__call__") else margins)
# return margins
def parse_distance(dist, lat_0=None):
"""Returns the distance in degrees. If dist is in km or m, and lat_0 is not None,
returns w2lon, else h2lat. dist None defaults to 0
:param dist: float, int None, string. If string and has a unit, see above
"""
try:
return 0 if dist is None else float(dist)
except ValueError:
if dist[-3:].lower() == 'deg':
return float(dist[:-3])
elif dist[-2:] == 'km':
dst = 1000 * float(dist[:-2])
elif dist[-1:] == 'm':
dst = float(dist[:1])
else:
raise
return w2lon(dst, lat_0) if lat_0 is not None else h2lat(dst)
def get_lon0_lat0(min_lons, min_lats, max_lons, max_lats):
""" Calculates lat_0, lon_0, i.e., the mid point of the bounding box denoted by the
arguments
:param min_lons: the minimum of longitudes
:param min_lats: the maximum of latitudes
:param max_lons: the minimum of longitudes
:param max_lats: the maximum of latitudes
:return: the 2-element tuple denoting the mid point lon_0, lat_0
"""
lat_0 = max_lats / 2. + min_lats / 2.
lon_0 = max_lons / 2. + min_lons / 2.
if lon_0 > 180: # FIXME: necessary?? see self.get_normalized... above
lon_0 -= 360
return lon_0, lat_0
def getbounds(min_lon, min_lat, max_lon, max_lat, margins):
"""Calculates the bounds given the bounding box identified by the arguments and
given optional margins
:param min_lon: the minimum longitude (numeric, scalar)
:param min_lat: the maximum latitude (numeric, scalar)
:param max_lon: the minimum longitude (numeric, scalar)
:param max_lat: the maximum latitude (numeric, scalar)
:param margins: the margins as a css-like string (with units 'deg', 'km' or 'm'), or as
a 1 to 4 element array of numeric values (in that case denoting degrees).
As in css, a 4 element array denotes the [top, right, bottom, left] values.
None defaults to [0, 0, 0, 0].
:return: the 6-element tuple denoting lon_0, lat_0, min_lon, min_lat, max_lon, max_lat.
where min_lon, min_lat, max_lon, max_lat are the new bounds and lon_0 and lat_0 are
their midpoints (x and y, respectively)
"""
def parsefunc(mrgns):
"""parses mrgns as array of strings into array of floats
"""
return parse_distance(mrgns[0]), parse_distance(mrgns[1], max_lat), \
parse_distance(mrgns[2]), parse_distance(mrgns[3], min_lat)
top, right, btm, left = parse_margins(margins, parsefunc)
min_lon, min_lat, max_lon, max_lat = min_lon-left, min_lat-btm, max_lon+right, max_lat+top
if min_lon == max_lon:
min_lon -= 10 # in degrees
max_lon += 10 # in degrees
if min_lat == max_lat:
min_lat -= 10 # in degrees
max_lat += 10 # in degrees
# minima must be within bounds:
min_lat = max(-90, min_lat)
max_lat = min(90, max_lat)
min_lon = max(-180, min_lon)
max_lon = min(180, max_lon)
lon_0, lat_0 = get_lon0_lat0(min_lon, min_lat, max_lon, max_lat)
return lon_0, lat_0, min_lon, min_lat, max_lon, max_lat
# static constant converter (degree to meters and viceversa) for latitudes
DEG2M_LAT = 2 * np.pi * 6371 * 1000 / 360
def lat2h(distance_in_degrees):
"""converts latitude distance from degrees to height in meters
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # 2 * np.pi * 6371 * 1000 / 360
return distance_in_degrees * deg2m_lat
def h2lat(distance_in_meters):
"""converts latitude distance from height in meters to degrees
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
return distance_in_meters / deg2m_lat
def lon2w(distance_in_degrees, lat_0):
"""converts longitude distance from degrees to width in meters
:param distance_in_degrees: a distance (python scalar or numpy array)
along the lat_0 circle expressed in degrees
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude of the circle along
which the lon2w(distance_in_degrees) must be converted to meters"""
deg2m_lat = DEG2M_LAT
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_degrees * deg2m_lon
def w2lon(distance_in_meters, lat_0):
"""converts longitude distance from width in meters to degrees
:param distance_in_meters: a distance (python scalar or numpy array)
along the lat_0 circle expressed in meters
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude (in degrees) of the
circle along which w2lon(distance_in_meters) must be converted to degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_meters / deg2m_lon
class MapHandler(object):
"""
Class handling bounds of a map given points (lons and lats)
"""
def __init__(self, lons, lats, map_margins):
"""Initializes a new MapHandler. If figure here is None, you **MUST**
call self.set_fig(fig) to calculate bounds and other stuff
when you have a ready figure"""
self.lons = lons if len(lons) else [0] # FIXME: use numpy arrays!!
self.lats = lats if len(lats) else [0]
self.max_lons, self.min_lons = max(self.lons), min(self.lons)
self.max_lats, self.min_lats = max(self.lats), min(self.lats)
self.lon_0, self.lat_0, self.llcrnrlon, self.llcrnrlat, self.urcrnrlon, self.urcrnrlat = \
getbounds(self.min_lons, self.min_lats, self.max_lons, self.max_lats, map_margins)
def _get_map_dims(self): # , fig_size_in_inches, colorbar=False):
"""Returns the map dimension width, height, in meters"""
max_lons, min_lons = self.urcrnrlon, self.llcrnrlon
max_lats, min_lats = self.urcrnrlat, self.llcrnrlat
height = lat2h(max_lats - min_lats)
width = lon2w(max_lons - min_lons, self.lat_0)
return width, height
def get_parallels(self, max_labels_count=8):
width, height = self._get_map_dims()
lat_0 = self.lat_0
N1 = int(np.ceil(height / max(width, height) * max_labels_count))
parallels = MapHandler._linspace(lat_0 - h2lat(height / 2),
lat_0 + h2lat(height / 2), N1)
return parallels
def get_meridians(self, max_labels_count=8):
width, height = self._get_map_dims()
lon_0 = self.lon_0
lat_0 = self.lat_0
N2 = int(np.ceil(width / max(width, height) * max_labels_count))
meridians = MapHandler._linspace(lon_0 - w2lon(width / 2, lat_0),
lon_0 + w2lon(width / 2, lat_0), N2)
meridians[meridians > 180] -= 360
return meridians
@staticmethod
def _linspace(val1, val2, N):
"""
returns around N 'nice' values between val1 and val2. Copied from obspy.plot_map
"""
dval = val2 - val1
round_pos = int(round(-np.log10(1. * dval / N)))
# Fake negative rounding as not supported by future as of now.
if round_pos < 0:
factor = 10 ** (abs(round_pos))
delta = round(2. * dval / N / factor) * factor / 2
else:
delta = round(2. * dval / N, round_pos) / 2
new_val1 = np.ceil(val1 / delta) * delta
new_val2 = np.floor(val2 / delta) * delta
N = (new_val2 - new_val1) / delta + 1
return np.linspace(new_val1, new_val2, N)
def _normalize(obj, size=None, dtype=None):
""""Casts" obj to a numpy array of the given optional size and optional dtype, and returns it.
If size is not None, the array must have length size. If not, and has length 1, it will be
resized to the specified size. Otherwise a ValueError is raised
If size is None, no resize will be in place and the array is returend as it is
Note: obj=None will be converted to the array [None], apparently in the current version of numpy
this wouldn't be the default (see argument ndmin=1)
:return an numpy array resulting to the coinversion of obj into array
:Examples:
"""
x = np.array(obj, ndmin=1) if dtype is None else np.array(obj, ndmin=1, dtype=dtype)
if size is None:
return np.array([]) if obj is None else x # if obj is None x is [None], return [] instead
try:
if len(x) == 1:
x = np.resize(x, size)
elif len(x) != size:
raise ValueError("invalid array length: %d. Expected %d" % (len(x), size))
except (ValueError, TypeError) as _err:
raise ValueError(str(_err))
return x
def torgba(html_str):
"""Converts html_str into a tuple of rgba colors all in [0, 1]
Curiously, matplotlib color functions do not provide this functionality for
'#RGBA' color formats
:param html_str: a valid html string in hexadecimal format.
Can have length 4, 7 or 9 such as #F1a, #fa98e3, #fc456a09
:return: a rgba vector, i.e. a 4-element numpy array of values in [0,1] denoting `html_str`
:raise: ValueError if html_str is invalid
"""
if len(html_str) not in (4, 7, 9) or not html_str[0] == '#':
raise ValueError("'%s' invalid html string" % html_str)
elif len(html_str) == 4:
rgb = [html_str[i:i+1]*2 for i in xrange(1, len(html_str))]
else:
rgb = [html_str[i:i+2] for i in xrange(1, len(html_str), 2)]
if len(rgb) == 3:
rgb += ['FF']
return np.true_divide(np.array([int(r, 16) for r in rgb]), 255)
def _shapeargs(lons, lats, labels, sizes, colors, markers, legend_labels):
lons = _normalize(lons, dtype=float) # basically: convert to float array if scalar (size=0)
lats = _normalize(lats, dtype=float) # basically: convert to float array if scalar (size=0)
if len(lons) != len(lats):
raise ValueError('mismatch in lengths: lons (%d) and lats (%d)' % (len(lons), len(lats)))
leng = len(lons)
labels = _normalize(labels, size=leng)
colors = _normalize(colors, size=leng)
markers = _normalize(markers, size=leng)
legend_labels = _normalize(legend_labels, size=leng)
# colors[np.isnan(colors) | (colors <= 0)] = 1.0 # nan colors default to 1 (black?)
sizes = _normalize(sizes, size=leng, dtype=float)
valid_points = np.logical_not(np.isnan(lons) | np.isnan(lats) | (sizes <= 0))
# return all points whose corresponding numeric values are not nan:
return (lons[valid_points],
lats[valid_points],
labels[valid_points],
sizes[valid_points],
colors[valid_points],
markers[valid_points],
legend_labels[valid_points])
# def get_ax_size(ax, fig):
# bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# return bbox.width, bbox.height
def pix2inch(pix, fig):
"""Converts pixel to inches on a given matplotlib figure"""
return pix / fig.dpi
def inch2pix(inch, fig):
"""Converts inches to pixel on a given matplotlib figure"""
return inch * fig.dpi
def _joinargs(key_prefix, kwargs, **already_supplied_args):
'''updates already_supplied_args with kwargs using a given prefix in kwargs to identify
common keys. Used in plotmap for kwargs'''
key_prefix += "_"
len_prefix = len(key_prefix)
already_supplied_args.update({k[len_prefix:]: v
for k, v in kwargs.iteritems() if k.startswith(key_prefix)})
return already_supplied_args
def _mp_set_custom_props(drawfunc_retval, lines_props, labels_props):
"""Sets custom properties on drawparallels or drawmeridians return function.
drawfunc_retval is a dict of numbers mapped to tuples where the first element is a list of
matplotlib lines, and the second element is a list of matplotlib texts"""
_setprop(chain.from_iterable((lin for lin, lab in drawfunc_retval.itervalues())), lines_props)
_setprop(chain.from_iterable((lab for lin, lab in drawfunc_retval.itervalues())), labels_props)
def _setprop(iterator_of_mp_objects, props):
'''sets the given properties of an iterator of same type matplotlib objects'''
if not props:
return
prp = {}
for obj in iterator_of_mp_objects:
if not prp:
prp = {"set_%s" % name: val for name, val in props.iteritems()
if hasattr(obj, "set_%s" % name)}
for name, val in prp.iteritems():
getattr(obj, name)(val)
# values below CAN be None but CANNOT be arrays containing None's
def plotmap(lons,
lats,
labels=None,
legendlabels=None,
markers="o",
colors="#FF4400",
sizes=20,
cmap=None,
fontsize=None,
fontweight='regular',
fontcolor='k',
labels_h_offset=0,
labels_v_offset=0,
mapmargins='0.5deg',
figmargins=2,
arcgis_service='World_Street_Map',
arcgis_xpixels=1500,
arcgis_dpi=96,
urlfail='ignore',
maxmeridians=5,
maxparallels=5,
legend_pos='bottom',
legend_borderaxespad=1.5,
legend_ncol=1,
title=None,
show=False,
**kwargs): # @UnusedVariable
"""
Makes a scatter plot of points on a map background using ArcGIS REST API.
:param lons: (array-like of length N or scalar) Longitudes of the data points, in degreee
:param lats: (array-like of length N or scalar) Latitudes of the data points, in degree
:param labels: (array-like of length N or string. Default: None, no labels) Annotations
(labels) for the individual data points on the map. If non-array (e.g. string), the same value
will be applied to all points
:param legendlabels: (array-like of length N or string. Default: None, no legend)
Annotations (labels) for the legend. You can supply a sparse array where only some points
will be displayed on the legend. All points with no legend label will not show up in the
legend
:param sizes: (array-like of length N or number. Default: 20) Sizes (in points^2) of the
individual points in the scatter plot.
:param markers: (array-like of length N,
`MarkerStyle<http://matplotlib.org/api/markers_api.html#matplotlib.markers.MarkerStyle>`_ or
string. Default: 'o' - circle) The markers (shapes) to be drawn for each point on the map.
See `markers <http://matplotlib.org/api/markers_api.html#module-matplotlib.markers>`_ for
more information on the different styles of markers scatter supports. Marker can be either
an instance of the class or the text shorthand for a particular marker.
:param colors: (array-like of length N,
`matplotlib color <http://matplotlib.org/api/colors_api.html>`_, e.g. string.
Default: "#FF4400")
Colors for the markers (fill color). You can type color transparency by supplying string of 9
elements where the last two characters denote the transparency ('00' fully transparent,
'ff' fully opaque). Note that this is a feature not implemented in `matplotlib` colors, where
transparency is given as the last element of the numeric tuple (r, g, b, a)
:param fontsize: (numeric or None. Default: None) The fontsize for all texts drawn on the
map (labels, axis tick labels, legend). None uses the default figure font size for all. Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontweight: (string or number. Default: 'regular') The font weight for all texts drawn
on the map (labels, axis tick labels, legend). Accepts the values (see
http://matplotlib.org/api/text_api.html#matplotlib.text.Text.set_weight):
```
[a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' |
'bold' | 'heavy' | 'extra bold' | 'black' ]
```
Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontcolor: (`matplotlib color <http://matplotlib.org/api/colors_api.html>`_ or
string. Default: 'k', black) The font color for all texts drawn on the
map (labels, axis tick labels, legend). Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param labels_h_offset: (string, number. Defaults None=0) The horizontal offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels westward, positive values eastward. Useful for not overlapping
markers and labels.
If numeric, it is assumed to be the expressed in degrees. Otherwise, you can supply a string
with a number followed by one of the units 'm', 'km' or 'deg' (e.g., '5km', '0.5deg').
Note that this value affects the
`horizontalalignment` and `multialignment` properties of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_horizontalalignment` or `labels_ha` as optional argument will override
this behaviour (see `kwargs` below)
:param labels_v_offset: (string, number. Defaults None=0) The vertical offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels southhward, positive values northward. See notes on `labels_h_offset` for details
Note that this value affects the
`verticalalignment` property of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_verticalalignment` or `labels_va` as optional argument will override
this behaviour (see `kwargs` below)
:param mapmargins: (array-like of 1,2,3,4 elements, numeric or string, or None=0.
Default: '0.5deg').
The map margins, i.e. how much the map has to 'expand/shrink' in any direction, relative
to the bounding box calculated to include all points.
If array-like, it behaves like the css 'margin' property of html: 4 elements will denote
[top, right, bottom, left], two elements will denote [top/bottom, left/right], three
elements [top, right/left, bottom], a single element array (or a single number or a string)
applies the value to all directions.
Finally, elements of the array must be expressed as the arguments `labels_h_offset` or
`labels_v_offset`: numbers denoting degrees or strings with units 'm', 'km', 'deg'. Negative
values will shrink the map.
If string, the argument will be first splitted using commas, semicolon or spaces as delimiters
(if no delimiter is found, the string is taken as a single chunk) and converted to an array-like
object.
:param figmargins: (array-like of 1,2,3,4 elements, number or None=0. Default:2) The
figure margins *in font height units* (e.g., 2 means: twice the font height). This argument
behaves exactly as `mapmargins` but expands/shrinks the distances between map and figure
(image) bounds. Useful to include axis tick labels or legend, if they overflow.
Note also that strings
are allowed only if they are parsable to float (e.g. "5,6; -12 1")
:param arcgis_service: (string, default: 'World_Street_Map'). The map image type, or
more technically the service for the map
hosted on ArcGIS server. Other values are 'ESRI_Imagery_World_2D'
(default in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_),
'World_Topo_Map', 'World_Terrain_Base'. For details, see:
http://server.arcgisonline.com/arcgis/rest/services.
:param arcgis_xpixels: (numeric, default: 3000). Requested number of image pixels
in x-direction (default is 400 in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_).
The documentation is quite unclear but this parameter seems to set the zoom of the image. From
this `link <http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage>`_:
A bigger number will ask a bigger image, so the image will have more detail.
So when the zoom is bigger, `xsize` must be bigger to maintain the resolution
:param urlfail: (string, 'raise' or 'ignore'. Default: 'ignore'). Tells what to do if the
ArcGIS requet fails (URLError, no internet connection etcetera). By default, on failure a raw
map with continents contour, and oceans will be plotted (good for
debug). Otherwise, the exception resulting from the web request is raised
:param maxmeridians: (numeric default: 5). The number of maximum meridians to be drawn. Set to
<=0 to hide meridians. Note that also x-axis labels are drawn.
To further manipulate meridians display, use any argument starting with
'mlabels_', 'mlines_' or 'meridians' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `meridians_linewidth=0` or 'mlines_linewidth=0'.
:param maxparallels: (numeric default: 5). The number of maximum parallels to be drawn. Set to
<=0 to hide parallels. Note that also y-axis labels are drawn.
To further manipulate parallels display, use any argument starting with
'plabels_', 'plines_' or 'parallels' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `parallels_linewidth=0` or 'plines_linewidth=0'.
:param legend_pos: (string in ['top'. 'bottom', 'right', 'left'], default='bottom'). The legend
location with respect to the map. It also adjusts the bounding box that the legend will be
anchored to.
For
customizing entirely the legend placement overriding this parameter, provide `legend_loc`
(and optionally `legend_bbox_to_anchor`) in `kwargs` (see below)
:param legend_borderaxespad: (numeric, default 1.5) The pad between the axes and legend border,
in font units
:param legend_ncol: (integer, default=1) The legend number of columns
:param title (string or None. Default: None): Title above plot (Note: not tested)
:param show (boolean, default: False): Whether to show the figure after plotting or not
(Note: not tested). Can be used to do further customization of the plot before showing it.
:param fig: (matplotlib figure or None, default: None). Note: deprecated, pass None as
supplying an already existing figure with other axes might break the figure layout
:param kwargs: any kind of additional argument passed to `matplotlib` and `Basemap` functions
or objects.
The name of the argument must be of the form
```
prefix_propertyname=propertyvalue
```
where prefix indicates the function/object to be called with keyword argument:
```
propertyname=propertyvalue
```
Current supported prefixes are (for available property names see links):
Prefix Passes `propertyname` to
============ ==================================================================================
arcgis `Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>_
used to retrieve the background map using ArgGIS Server REST API. See also
http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage
basemap `Basemap <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap>`_
the object responsible of drawing and managing the map. Note that
`basemap_resolution=h` and `basemap_epsg=4326` by default.
labels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the point labels on the map
legend The `legend <http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_.
See the already implemented arguments `legend_borderaxespad`,
`legend_ncol`
legendlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the text labels of the legend
meridians `Basemap.drawmeridians`. For more detailed settings on meridians, see
`mlines` and `mlabels`
parallels `Basemap.drawparallels`. For more detailed settings on parallels, see
`plines` and `plabels`
plines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the parallels
plabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the parallels labels on the y axis
mlines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the meridians
mlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the meridians labels on the x axis
============ ==================================================================================
Examples
--------
- `legend_title='abc'` will call `legend(..., title='abc', ...)`
- `labels_path_effects=[PathEffects.withStroke(linewidth=2, foreground='white')]` will set the
a white contour around each label text
- `meridians_labelstyle="+/-"` will call `Basemap.drawmeridians(..., labelstyle="+/-", ...)`
Notes:
------
The objects referenced by `plines`, `plabels`, `mlines`, `mlabels` and `legendlabels`
cannot be initialized directly with the given properties, which will be set after they are
created assuming that for any property `foo` passed as keyword argument in their constructor
there exist a method `set_foo(...)` (which will be called with the given propertyvalue).
This is most likely always true according to matplotlib api, but we cannot assure it works
100% of the times
"""
lons, lats, labels, sizes, colors, markers, legendlabels =\
_shapeargs(lons, lats, labels, sizes, colors, markers, legendlabels)
# convert html strings to tuples of rgba values in [0.1] if the former are in string format,
# because (maybe too old matplotlib version?) colors in the format '#RGBA' are not supported
# Also, if cmap is provided, basemap.scatter calls matplotlib.scatter which
# wants float sequenes in case of color map
if colors.dtype.char in ('U', 'S'): # pylint: disable=no-member
colors = np.array([torgba(c) for c in colors])
fig = plt.figure()
map_ax = fig.add_axes([0, 0, 1, 1]) # set axes size the same as figure
# setup handler for managing basemap coordinates and meridians / parallels calculation:
handler = MapHandler(lons, lats, mapmargins)
kwa = _joinargs('basemap', kwargs,
llcrnrlon=handler.llcrnrlon,
llcrnrlat=handler.llcrnrlat,
urcrnrlon=handler.urcrnrlon,
urcrnrlat=handler.urcrnrlat,
epsg='4326', # 4326, # 3395, # 3857,
resolution='i', # 'h',
ax=map_ax)
bmap = Basemap(**kwa)
try:
kwa = _joinargs("arcgis", kwargs, service=arcgis_service, xpixels=arcgis_xpixels,
dpi=arcgis_dpi)
# set the map image via a map service. In case you need the returned values, note that
# This function returns an ImageAxis (or AxisImage, check matplotlib doc)
bmap.arcgisimage(**kwa)
except (URLError, HTTPError, socket.error) as exc:
# failed, maybe there is not internet connection
if urlfail == 'ignore':
# Print a simple map offline
bmap.drawcoastlines()
watercolor = '#4444bb'
bmap.fillcontinents(color='#eebb66', lake_color=watercolor)
bmap.drawmapboundary(fill_color=watercolor)
else:
raise
# draw meridians and parallels. From basemap.drawmeridians / drawparallels doc:
# returns a dictionary whose keys are the meridian values, and
# whose values are tuples containing lists of the
# matplotlib.lines.Line2D and matplotlib.text.Text instances
# associated with each meridian. Deleting an item from the
# dictionary removes the correpsonding meridian from the plot.
if maxparallels > 0:
kwa = _joinargs("parallels", kwargs, linewidth=1, fontsize=fontsize,
labels=[0, 1, 1, 0], fontweight=fontweight)
parallels = handler.get_parallels(maxparallels)
# Old basemap versions have problems with non-integer parallels.
try:
# Note: the method below # returns a list of text object
# represeting the tick labels
_dict = bmap.drawparallels(parallels, **kwa)
except KeyError:
parallels = sorted(list(set(map(int, parallels))))
_dict = bmap.drawparallels(parallels, **kwa)
# set custom properties:
kwa_lines = _joinargs("plines", kwargs)
kwa_labels = _joinargs("plabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
if maxmeridians > 0:
kwa = _joinargs("meridians", kwargs, linewidth=1, fontsize=fontsize,
labels=[1, 0, 0, 1], fontweight=fontweight)
meridians = handler.get_meridians(maxmeridians)
_dict = bmap.drawmeridians(meridians, **kwa)
# set custom properties:
kwa_lines = _joinargs("mlines", kwargs)
kwa_labels = _joinargs("mlabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
# fig.get_axes()[0].tick_params(direction='out', length=15) # does not work, check basemap
fig.bmap = bmap
# compute the native bmap projection coordinates for events.
# from the docs (this is kind of outdated, however leave here for the moment):
# Calling a Basemap class instance with the arguments lon, lat will
# convert lon/lat (in degrees) to x/y map projection
# coordinates (in meters). If optional keyword ``inverse`` is
# True (default is False), the inverse transformation from x/y
# to lon/lat is performed.
# For cylindrical equidistant projection (``cyl``), this
# does nothing (i.e. x,y == lon,lat).
# For non-cylindrical projections, the inverse transformation
# always returns longitudes between -180 and 180 degrees. For
# cylindrical projections (self.projection == ``cyl``,
# ``cea``, ``mill``, ``gall`` or ``merc``)
# the inverse transformation will return longitudes between
# self.llcrnrlon and self.llcrnrlat.
# Input arguments lon, lat can be either scalar floats,
# sequences, or numpy arrays.
# parse hoffset and voffset and assure they are at least arrays of 1 elements
# (for aligning text labels, see below)
hoffset = np.array(parse_distance(labels_h_offset, lats), copy=False, ndmin=1)
voffset = np.array(parse_distance(labels_v_offset), copy=False, ndmin=1)
lbl_lons = lons + hoffset
lbl_lats = lats + voffset
# convert labels coordinates:
xlbl, ylbl = bmap(lbl_lons, lbl_lats)
# plot point labels
max_points = -1 # negative means: plot all
if max_points < 0 or len(lons) < max_points:
# Set alignments which control also the corner point reference when placing labels
# from (FIXME: add ref?)
# horizontalalignment controls whether the x positional argument for the text indicates
# the left, center or right side of the text bounding box.
# verticalalignment controls whether the y positional argument for the text indicates
# the bottom, center or top side of the text bounding box.
# multialignment, for newline separated strings only, controls whether the different lines
# are left, center or right justified
ha = 'left' if hoffset[0] > 0 else 'right' if hoffset[0] < 0 else 'center'
va = 'bottom' if voffset[0] > 0 else 'top' if voffset[0] < 0 else 'center'
ma = ha
kwa = _joinargs("labels", kwargs, fontweight=fontweight, color=fontcolor,
zorder=100, fontsize=fontsize, horizontalalignment=ha,
verticalalignment=va, multialignment=ma)
for name, xpt, ypt in zip(labels, xlbl, ylbl):
# Check if the point can actually be seen with the current bmap
# projection. The bmap object will set the coordinates to very
# large values if it cannot project a point.
if xpt > 1e25:
continue
map_ax.text(xpt, ypt, name, **kwa)
# plot points
x, y = bmap(lons, lats)
# store handles to points, and relative labels, if any
leg_handles, leg_labels = [], []
# bmap.scatter accepts all array-like args except markers. Avoid several useless loops
# and do only those for distinct markers:
# unique markers (sorted according to their index in markers, not their value):
mrks = markers[np.sort(np.unique(markers, return_index=True)[1])]
for mrk in mrks:
# Note using masks with '==' (numpy==1.11.3):
#
# >>> a = np.array([1,2,3])
# >>> a == 3
# array([False, False, True], dtype=bool) # OK
# >>> a == None
# False # NOT AS EXPECTED!
# >>> np.equal(a, None)
# array([False, False, False], dtype=bool) # OK
#
# (Note also that a == None issues:
# FutureWarning: comparison to `None` will result in an elementwise object
# comparison in the future.)
#
# So the correct way is to write
# mask = np.equal(array, val) if val is None else (a == val)
m_mask = np.equal(markers, mrk) if mrk is None else markers == mrk # see above
__x = x[m_mask]
__y = y[m_mask]
__m = mrk
__s = sizes[m_mask]
__c = colors[m_mask]
__l = legendlabels[m_mask]
# unique legends (sorted according to their index in __l, not their value):
for leg in __l[np.sort(np.unique(__l, return_index=True)[1])]:
l_mask = np.equal(__l, leg) if leg is None else __l == leg # see above
_scatter = bmap.scatter(__x[l_mask],
__y[l_mask],
marker=mrk,
s=__s[l_mask],
c=__c[l_mask],
cmap=cmap,
zorder=10)
if leg:
leg_handles.append(_scatter)
leg_labels.append(leg)
if leg_handles:
# if we provided `legend_loc`, use that:
loc = kwargs.get('legend_loc', None)
bbox_to_anchor = None # defaults in matplotlib legend
# we do have legend to show. Adjust legend reference corner:
if loc is None:
if legend_pos == 'bottom':
loc = 'upper center'
bbox_to_anchor = (0.5, -0.05)
elif legend_pos == 'top':
loc = 'lower center'
bbox_to_anchor = (0.5, 1.05)
elif legend_pos == 'left':
loc = 'center right'
bbox_to_anchor = (-0.05, 0.5)
elif legend_pos == 'right':
loc = 'center left'
bbox_to_anchor = (1, 0.5)
else:
raise ValueError('invalid legend_pos value:"%s"' % legend_pos)
# The plt.legend has the prop argument which sets the font properties:
# family, style, variant, weight, stretch, size, fname. See
# http://matplotlib.org/api/font_manager_api.html#matplotlib.font_manager.FontProperties
# However, that property does not allow to set font color. So we
# use the get_text method of Legend. Note that we pass font size *now* even if
# setting it later works as well (the legend frame is resized accordingly)
kwa = _joinargs("legend", kwargs, scatterpoints=1, ncol=legend_ncol, loc=loc,
bbox_to_anchor=bbox_to_anchor, borderaxespad=legend_borderaxespad,
fontsize=fontsize)
# http://stackoverflow.com/questions/17411940/matplotlib-scatter-plot-legend
leg = map_ax.legend(leg_handles, leg_labels, **kwa)
# set properties supplied via 'legend_'
_setprop(leg.get_texts(), _joinargs("legendlabels", kwargs, color=fontcolor))
# re-position the axes. The REAL map aspect ratio seems to be this:
realratio_h_w = bmap.aspect
fig_w, fig_h = fig.get_size_inches()
figratio_h_w = np.true_divide(fig_h, fig_w)
if figratio_h_w >= realratio_h_w:
# we have margins (blank space) above and below
# thus, we assume:
map_w = fig_w
# and we calculate map_h
map_h = map_w * realratio_h_w
# assume there is the same amount of space above and below:
vpad = (fig_h - map_h) / 2.0
# hpad is zero:
hpad = 0
else:
# we have margins (blank space) left and right
# thus, we assume:
map_h = fig_h
# and consequently:
map_w = map_h / realratio_h_w
# assume there is the same amount of space above and below:
hpad = (fig_w - map_w) / 2.0
# wpad is zero:
vpad = 0
# calculate new fig dimensions EXACTLY as contour of the map
new_fig_w = fig_w - 2 * hpad
new_fig_h = fig_h - 2 * vpad
# now margins:
marginz = parse_margins(figmargins) # margins are in fontheight units. Get font height:
fontsize_inch = 0
if len(np.nonzero(marginz)[0]):
# Calculate the font size in pixels.
# We want to be consistent with matplotlib way of getting fontsize.
# inspecting matplotlib.legend.Legend.draw we end up with:
# 1. Get the renderer
rend = fig.canvas.get_renderer()
# 2. get the fontsize in points. We might use `fontsize` but it might be None and we want
# the default in case. There are several 'defaults' (rcParams['font.size'],
# rcParams["legend.fontsize"])... we don't care for now, use the first. How to get
# rcParams['font.size'] ? Either this: (see at matplotlib.Legend.__init__):
# fontsize_pt = FontProperties(size=fontsize, weight=fontweight).get_size_in_points()
# or simply do:
fontsize_pt = fontsize or rcParams['font.size']
# Now use renderer to convert to pixels:
# For info see matplotlib.text.Text.get_window_extent
fontsize_px = rend.points_to_pixels(fontsize_pt)
# finally inches:
fontsize_inch = pix2inch(rend.points_to_pixels(fontsize_px), fig)
# calculate insets in inches (top right bottom left)
insets_inch = marginz * fontsize_inch
# set to fig dimensions
new_fig_w += insets_inch[1] + insets_inch[3]
new_fig_h += insets_inch[0] + insets_inch[2]
fig.set_size_inches(new_fig_w, new_fig_h, forward=True)
# (forward necessary if fig is in GUI, let's set for safety)
# now the axes which are relative to the figure. Thus first normalize inches:
insets_inch /= [fig_h, fig_w, fig_h, fig_w]
# pos1 = map_ax.get_position() # get the original position
# NOTE: it seems that pos[0], pos[1] indicate the x and y of the LOWER LEFT corner, not
# upper left!
pos2 = [insets_inch[3], insets_inch[2],
1 - (insets_inch[1] + insets_inch[3]),
1 - (insets_inch[0] + insets_inch[2])]
map_ax.set_position(pos2)
if title:
plt.suptitle(title)
if show:
plt.show()
return fig
| gpl-3.0 |
fdudatamining/framework | tests/draw/test_simple.py | 1 | 1233 | import numpy as np
import pandas as pd
from unittest import TestCase
from framework import draw
X = np.array([1, 2, 3, 4, 5])
class TestSimplePlots(TestCase):
def test_kinds(self):
self.assertIsNotNone(draw.draw_kinds)
def test_line(self):
draw.draw(clear=True, kind='line', x=X, y=X)
draw.draw(clear=True, kind='line', y=X)
def test_scatter(self):
draw.draw(clear=True, kind='scatter', x=X, y=X)
draw.draw(clear=True, kind='scatter', y=X)
def test_stem(self):
draw.draw(clear=True, kind='stem', x=X, y=X)
draw.draw(clear=True, kind='stem', y=X)
def test_errorbar(self):
draw.draw(clear=True, kind='errorbar', x=X, y=X, xerr=X, yerr=X)
draw.draw(clear=True, kind='errorbar', y=X, yerr=X)
def test_boxplot(self):
draw.draw(clear=True, kind='boxplot', x=X)
def test_barplot(self):
draw.draw(clear=True, kind='barplot', x=X, y=X, width=1)
draw.draw(clear=True, kind='barplot', x=X, y=X)
draw.draw(clear=True, kind='barplot', y=X)
def test_contour(self):
draw.draw(clear=True, kind='contour', z=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def test_hist(self):
draw.draw(clear=True, kind='hist', x=X, bins=2)
draw.draw(clear=True, kind='hist', x=X)
| gpl-2.0 |
mantidproject/mantid | qt/python/mantidqt/gui_helper.py | 3 | 5994 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy.QtWidgets import (QApplication) # noqa
from qtpy import QtCore, QtGui
import matplotlib
import sys
import os
try:
from mantid import __version__ as __mtd_version
from mantid import _bindir as __mtd_bin_dir
# convert to major.minor
__mtd_version = '.'.join(__mtd_version.split(".")[:2])
except ImportError: # mantid not found
__mtd_version = ''
__mtd_bin_dir=''
def set_matplotlib_backend():
'''MUST be called before anything tries to use matplotlib
This will set the backend if it hasn't been already. It also returns
the name of the backend to be the name to be used for importing the
correct matplotlib widgets.'''
backend = matplotlib.get_backend()
if backend.startswith('module://'):
if backend.endswith('qt4agg'):
backend = 'Qt4Agg'
elif backend.endswith('workbench') or backend.endswith('qt5agg'):
backend = 'Qt5Agg'
else:
from qtpy import PYQT4, PYQT5 # noqa
if PYQT5:
backend = 'Qt5Agg'
elif PYQT4:
backend = 'Qt4Agg'
else:
raise RuntimeError('Do not know which matplotlib backend to set')
matplotlib.use(backend)
return backend
def get_qapplication():
''' Example usage:
app, within_mantid = get_qapplication()
reducer = eventFilterGUI.MainWindow() # the main ui class in this file
reducer.show()
if not within_mantid:
sys.exit(app.exec_())'''
app = QApplication.instance()
if app:
return app, app.applicationName().lower().startswith('mantid')
else:
return QApplication(sys.argv), False
def __to_external_url(interface_name: str, section: str, external_url: str) -> QtCore.QUrl:
if not external_url:
template = 'http://docs.mantidproject.org/nightly/interfaces/{}/{}.html'
external_url = template.format(section, interface_name)
return QtCore.QUrl(external_url)
def __to_qthelp_url(interface_name: str, section: str, qt_url: str) -> str:
if qt_url:
return qt_url
else:
template = 'qthelp://org.sphinx.mantidproject.{}/doc/interfaces/{}/{}.html'
return template.format(__mtd_version, section, interface_name)
def __get_collection_file(collection_file: str) -> str:
if not collection_file:
if not __mtd_bin_dir:
return 'HELP COLLECTION FILE NOT FOUND'
else:
collection_file = os.path.join(__mtd_bin_dir, '../docs/qthelp/MantidProject.qhc')
return os.path.abspath(collection_file)
def show_interface_help(mantidplot_name, assistant_process, area: str='',
collection_file: str='',
qt_url: str='', external_url: str=""):
''' Shows the help page for a custom interface
@param mantidplot_name: used by showCustomInterfaceHelp
@param assistant_process: needs to be started/closed from outside (see example below)
@param collection_file: qth file containing the help in format used by qtassistant. The default is
``mantid._bindir + '../docs/qthelp/MantidProject.qhc'``
@param qt_url: location of the help in the qth file. The default value is
``qthelp://org.sphinx.mantidproject.{mtdversion}/doc/interfaces/{mantidplot_name}.html``.
@param external_url: location of external page to be displayed in the default browser. The default value is
``http://docs.mantidproject.org/nightly/interfaces/framework/{mantidplot_name}.html``
Example using defaults:
#in the __init__ function of the GUI add:
self.assistant_process = QtCore.QProcess(self)
self.mantidplot_name='DGS Planner'
#add a help function in the GUI
def help(self):
show_interface_help(self.mantidplot_name,
self.assistant_process)
#make sure you close the qtassistant when the GUI is closed
def closeEvent(self, event):
self.assistant_process.close()
self.assistant_process.waitForFinished()
event.accept()
'''
try:
# try using built-in help in mantid
import mantidqt
mantidqt.interfacemanager.InterfaceManager().showCustomInterfaceHelp(mantidplot_name, area)
except: #(ImportError, ModuleNotFoundError) raises the wrong type of error
# built-in help failed, try external qtassistant then give up and launch a browser
# cleanup previous version
assistant_process.close()
assistant_process.waitForFinished()
# where to expect qtassistant
helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
helpapp += 'assistant'
collection_file = __get_collection_file(collection_file)
if os.path.isfile(helpapp) and os.path.isfile(collection_file):
# try to find the collection file and launch qtassistant
args = ['-enableRemoteControl',
'-collectionFile', collection_file,
'-showUrl', __to_qthelp_url(mantidplot_name, area, qt_url)]
assistant_process.close()
assistant_process.waitForFinished()
assistant_process.start(helpapp, args)
else:
# give up and upen a URL in default browser
openUrl=QtGui.QDesktopServices.openUrl
sysenv=QtCore.QProcessEnvironment.systemEnvironment()
ldp=sysenv.value('LD_PRELOAD')
if ldp:
del os.environ['LD_PRELOAD']
# create a url to the help in the default location
openUrl(__to_external_url(mantidplot_name, area, external_url))
if ldp:
os.environ['LD_PRELOAD']=ldp
| gpl-3.0 |
wateraccounting/wa | Collect/CFSR/DataAccess_CFSR.py | 1 | 8868 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/CFSR
"""
# General modules
import pandas as pd
import os
import numpy as np
from netCDF4 import Dataset
import re
from joblib import Parallel, delayed
# WA+ modules
from wa.Collect.CFSR.Download_data_CFSR import Download_data
from wa.General import data_conversions as DC
def CollectData(Dir, Var, Startdate, Enddate, latlim, lonlim, Waitbar, cores, Version):
"""
This function collects daily CFSR data in geotiff format
Keyword arguments:
Dir -- 'C:/file/to/path/'
Var -- 'dlwsfc','dswsfc','ulwsfc', or 'uswsfc'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -50 and 50)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
Waitbar -- 1 (Default) will print a wait bar
cores -- The number of cores used to run the routine.
It can be 'False' to avoid using parallel computing
routines.
Version -- 1 or 2 (1 = CFSR, 2 = CFSRv2)
"""
# Creates an array of the days of which the ET is taken
Dates = pd.date_range(Startdate,Enddate,freq = 'D')
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# For collecting CFSR data
if Version == 1:
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -89.9171038899 or latlim[1] > 89.9171038899:
print 'Latitude above 89.917N or below 89.917S is not possible. Value set to maximum'
latlim[0] = np.maximum(latlim[0],-89.9171038899)
latlim[1] = np.minimum(latlim[1],89.9171038899)
if lonlim[0] < -180 or lonlim[1] > 179.843249782:
print 'Longitude must be between 179.84E and 179.84W. Now value is set to maximum'
lonlim[0] = np.maximum(lonlim[0],-180)
lonlim[1] = np.minimum(lonlim[1],179.843249782)
# Make directory for the CFSR data
output_folder=os.path.join(Dir,'Radiation','CFSR')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# For collecting CFSRv2 data
if Version == 2:
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -89.9462116040955806 or latlim[1] > 89.9462116040955806:
print 'Latitude above 89.917N or below 89.946S is not possible. Value set to maximum'
latlim[0] = np.maximum(latlim[0],-89.9462116040955806)
latlim[1] = np.minimum(latlim[1],89.9462116040955806)
if lonlim[0] < -180 or lonlim[1] > 179.8977275:
print 'Longitude must be between 179.90E and 179.90W. Now value is set to maximum'
lonlim[0] = np.maximum(lonlim[0],-180)
lonlim[1] = np.minimum(lonlim[1],179.8977275)
# Make directory for the CFSRv2 data
output_folder=os.path.join(Dir,'Radiation','CFSRv2')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Pass variables to parallel function and run
args = [output_folder, latlim, lonlim, Var, Version]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
# Remove all .nc and .grb2 files
for f in os.listdir(output_folder):
if re.search(".nc", f):
os.remove(os.path.join(output_folder, f))
for f in os.listdir(output_folder):
if re.search(".grb2", f):
os.remove(os.path.join(output_folder, f))
for f in os.listdir(output_folder):
if re.search(".grib2", f):
os.remove(os.path.join(output_folder, f))
return results
def RetrieveData(Date, args):
# unpack the arguments
[output_folder, latlim, lonlim, Var, Version] = args
# Name of the model
if Version == 1:
version_name = 'CFSR'
if Version == 2:
version_name = 'CFSRv2'
# Name of the outputfile
if Var == 'dlwsfc':
Outputname = 'DLWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'dswsfc':
Outputname = 'DSWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'ulwsfc':
Outputname = 'ULWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'uswsfc':
Outputname = 'USWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
# Create the total end output name
outputnamePath = os.path.join(output_folder, Outputname)
# If the output name not exists than create this output
if not os.path.exists(outputnamePath):
local_filename = Download_data(Date, Version, output_folder, Var)
# convert grb2 to netcdf (wgrib2 module is needed)
for i in range(0,4):
nameNC = 'Output' + str(Date.strftime('%Y')) + str(Date.strftime('%m')) + str(Date.strftime('%d')) + '-' + str(i+1) + '.nc'
# Total path of the output
FileNC6hour = os.path.join(output_folder, nameNC)
# Band number of the grib data which is converted in .nc
band=(int(Date.strftime('%d')) - 1) * 28 + (i + 1) * 7
# Convert the data
DC.Convert_grb2_to_nc(local_filename, FileNC6hour, band)
if Version == 1:
if Date < pd.Timestamp(pd.datetime(2011, 01, 01)):
# Convert the latlim and lonlim into array
Xstart = np.floor((lonlim[0] + 180.1562497) / 0.3125)
Xend = np.ceil((lonlim[1] + 180.1562497) / 0.3125) + 1
Ystart = np.floor((latlim[0] + 89.9171038899) / 0.3122121663)
Yend = np.ceil((latlim[1] + 89.9171038899) / 0.3122121663)
# Create a new dataset
Datatot = np.zeros([576, 1152])
else:
Version = 2
if Version == 2:
# Convert the latlim and lonlim into array
Xstart = np.floor((lonlim[0] + 180.102272725) / 0.204545)
Xend = np.ceil((lonlim[1] + 180.102272725) / 0.204545) + 1
Ystart = np.floor((latlim[0] + 89.9462116040955806) / 0.204423)
Yend = np.ceil((latlim[1] + 89.9462116040955806) / 0.204423)
# Create a new dataset
Datatot = np.zeros([880, 1760])
# Open 4 times 6 hourly dataset
for i in range (0, 4):
nameNC = 'Output' + str(Date.strftime('%Y')) + str(Date.strftime('%m')) + str(Date.strftime('%d')) + '-' + str(i + 1) + '.nc'
FileNC6hour = os.path.join(output_folder, nameNC)
f = Dataset(FileNC6hour, mode = 'r')
Data = f.variables['Band1'][0:int(Datatot.shape[0]), 0:int(Datatot.shape[1])]
f.close()
data = np.array(Data)
Datatot = Datatot + data
# Calculate the average in W/m^2 over the day
DatatotDay = Datatot / 4
DatatotDayEnd = np.zeros([int(Datatot.shape[0]), int(Datatot.shape[1])])
DatatotDayEnd[:,0:int(Datatot.shape[0])] = DatatotDay[:, int(Datatot.shape[0]):int(Datatot.shape[1])]
DatatotDayEnd[:,int(Datatot.shape[0]):int(Datatot.shape[1])] = DatatotDay[:, 0:int(Datatot.shape[0])]
# clip the data to the extent difined by the user
DatasetEnd = DatatotDayEnd[int(Ystart):int(Yend), int(Xstart):int(Xend)]
# save file
if Version == 1:
pixel_size = 0.3125
if Version == 2:
pixel_size = 0.204545
geo = [lonlim[0],pixel_size,0,latlim[1],0,-pixel_size]
DC.Save_as_tiff(data = np.flipud(DatasetEnd), name = outputnamePath, geo = geo, projection = "WGS84")
return()
| apache-2.0 |
hiuwo/acq4 | acq4/analysis/tools/Fitting.py | 1 | 36006 | #!/usr/bin/env python
"""
Python class wrapper for data fitting.
Includes the following external methods:
getFunctions returns the list of function names (dictionary keys)
FitRegion performs the fitting
Note that FitRegion will plot on top of the current data using MPlots routines
if the current curve and the current plot instance are passed.
"""
# January, 2009
# Paul B. Manis, Ph.D.
# UNC Chapel Hill
# Department of Otolaryngology/Head and Neck Surgery
# Supported by NIH Grants DC000425-22 and DC004551-07 to PBM.
# Copyright Paul Manis, 2009
#
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Additional Terms:
The author(s) would appreciate that any modifications to this program, or
corrections of erros, be reported to the principal author, Paul Manis, at
pmanis@med.unc.edu, with the subject line "PySounds Modifications".
Note: This program also relies on the TrollTech Qt libraries for the GUI.
You must obtain these libraries from TrollTech directly, under their license
to use the program.
"""
import sys
import numpy
import scipy
try:
import openopt
HAVE_OPENOPT = True
except ImportError:
HAVE_OPENOPT = False
print "There was an error importing openopt. Continuing...."
import ctypes
import numpy.random
#from numba import autojit
usingMPlot = False
if usingMPlot:
import MPlot # we include plotting as part of the fitting
def debug_trace():
'''Set a tracepoint in the Python debugger that works with Qt'''
if pyqt:
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
if pyqt:
pyqtRemoveInputHook()
set_trace()
class Fitting():
# dictionary contains:
# name of function: function call, initial parameters, iterations, plot color, then x and y for testing
# target valutes, names of parameters, contant values, and derivative function if needed.
#
def __init__(self):
self.fitfuncmap = {
'exp0' : (self.exp0eval, [0.0, 20.0], 2000, 'k', [0, 100, 1.],
[1.0, 5.0], ['A0', 'tau'], None, None),
'exp1' : (self.expeval, [0.0, 0.0, 20.0], 2000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, self.expevalprime),
'expsum' : (self.expsumeval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'expsum2' : (self.expsumeval2, [0., -0.5, -0.250], 50000, 'k', [0, 1000, 1.],
[0., -0.5, -0.25], ['A0', 'A1'], [5., 20.], None),
'exp2' : (self.exp2eval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'exppow' : (self.exppoweval, [0.0, 1.0, 100, ], 2000, 'k', [0, 100, 0.1],
[0.0, 1.0, 100.0], ['DC', 'A0', 'tau'], None, None),
'exppulse' : (self.expPulse, [3.0, 2.5, 0.2, 2.5, 2.0, 0.5], 2000, 'k', [0, 10, 0.3],
[0.0, 0., 0.75, 4., 1.5, 1.], ['DC', 't0', 'tau1', 'tau2', 'amp', 'width'], None, None),
'boltz' : (self.boltzeval, [0.0, 1.0, -50.0, -5.0], 5000, 'r', [-130., -30., 1.],
[0.00, 0.010, -100.0, 7.0], ['DC', 'A0', 'x0', 'k'], None, None),
'gauss' : (self.gausseval, [1.0, 0.0, 0.5], 2000, 'y', [-10., 10., 0.2],
[1.0, 1.0, 2.0], ['A', 'mu', 'sigma'], None, None),
'line' : (self.lineeval, [1.0, 0.0], 500, 'r', [-10., 10., 0.5],
[0.0, 2.0], ['m', 'b'], None, None),
'poly2' : (self.poly2eval, [1.0, 1.0, 0.0], 500, 'r', [0, 100, 1.],
[0.5, 1.0, 5.0], ['a', 'b', 'c'], None, None),
'poly3' : (self.poly3eval, [1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd'], None, None),
'poly4' : (self.poly4eval, [1.0, 1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.1, 0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd', 'e'], None, None),
'sin' : (self.sineeval, [-1., 1.0, 4.0, 0.0], 1000, 'r', [0., 100., 0.2],
[0.0, 1.0, 9.0, 0.0], ['DC', 'A', 'f', 'phi'], None, None),
'boltz2' : (self.boltzeval2, [0.0, 0.5, -50.0, 5.0, 0.5, -20.0, 3.0], 1200, 'r',
[-100., 50., 1.], [0.0, 0.3, -45.0, 4.0, 0.7, 10.0, 12.0],
['DC', 'A1', 'x1', 'k1', 'A2', 'x2', 'k2'], None, None),
'taucurve' : (self.taucurve, [50., 300.0, 60.0, 10.0, 8.0, 65.0, 10.0], 50000, 'r',
[-150., 50., 1.], [0.0, 237.0, 60.0, 12.0, 17.0, 60.0, 14.0],
['DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'], None, self.taucurveder),
}
self.fitSum2Err = 0
def getFunctions(self):
return(self.fitfuncmap.keys())
def exp0eval(self, p, x, y=None, C = None, sumsq = False):
"""
Exponential function with an amplitude and 0 offset
"""
yd = p[0] * numpy.exp(-x/p[1])
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expsumeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials with independent time constants and amplitudes,
and a DC offset
"""
yd = p[0] + (p[1]* numpy.exp(-x/p[2])) + (p[3]*numpy.exp(-x/p[4]))
if y is None:
return yd
else:
yerr = y - yd
if weights is not None:
yerr = yerr * weights
if sumsq is True:
return numpy.sum(yerr**2)
else:
return yerr
def expsumeval2(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials, with predefined time constants , allowing
only the amplitudes and DC offset to vary
"""
yd = p[0] + (p[1]* numpy.exp(-x/C[0])) + (p[2]*numpy.exp(-x/C[1]))
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Exponential with offset
"""
yd = p[0] + p[1] * numpy.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expevalprime(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Derivative for exponential with offset
"""
ydp = p[1] * numpy.exp(-x/p[2])/(p[2]*p[2])
yd = p[0] + p[1] * numpy.exp(-x/p[2])
print y
if y is None:
return (yd, ydp)
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exppoweval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Single exponential function, rising to a ppower
"""
if C is None:
cx = 1.0
else:
cx = C[0]
yd = p[0] + p[1] * (1.0-numpy.exp(-x/p[2]))**cx
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exp2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
For fit to activation currents...
"""
yd = p[0] + (p[1] * (1.0 - numpy.exp(-x/p[2]))**2.0 ) + (p[3] * (1.0 - numpy.exp(-x/p[4])))
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y - yd)**2.0))
# if p[4] < 3.0*p[2]:
# ss = ss*1e6 # penalize them being too close
return ss
else:
return y - yd
# @autojit
def expPulse(self, p, x, y=None, C=None, sumsq = False, weights = None):
"""Exponential pulse function (rising exponential with optional variable-length
plateau followed by falling exponential)
Parameter p is [yOffset, t0, tau1, tau2, amp, width]
"""
yOffset, t0, tau1, tau2, amp, width = p
yd = numpy.empty(x.shape)
yd[x<t0] = yOffset
m1 = (x>=t0)&(x<(t0+width))
m2 = (x>=(t0+width))
x1 = x[m1]
x2 = x[m2]
yd[m1] = amp*(1-numpy.exp(-(x1-t0)/tau1))+yOffset
amp2 = amp*(1-numpy.exp(-width/tau1)) ## y-value at start of decay
yd[m2] = ((amp2)*numpy.exp(-(x2-(width+t0))/tau2))+yOffset
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y-yd)**2.0))
return ss
else:
return y-yd
def boltzeval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + (p[1]-p[0])/(1.0 + numpy.exp((x-p[2])/p[3]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2.0))
else:
return y - yd
def boltzeval2(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]/(1 + numpy.exp((x-p[2])/p[3])) + p[4]/(1 + numpy.exp((x-p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def gausseval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = (p[0]/(p[2]*numpy.sqrt(2.0*numpy.pi)))*numpy.exp(-((x - p[1])**2.0)/(2.0*(p[2]**2.0)))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def lineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x + p[1]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**2.0 + p[1]*x + p[2]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly3eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**3.0 + p[1]*x**2.0 + p[2]*x +p[3]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly4eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**4.0 + p[1]*x**3.0 + p[2]*x**2.0 + p[3]*x +p[4]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def sineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]*numpy.sin((x*2.0*numpy.pi/p[2])+p[3])
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def taucurve(self, p, x, y=None, C = None, sumsq=True, weights=None):
"""
HH-like description of activation/inactivation function
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
yd = p[0] + 1.0/(p[1]*numpy.exp((x+p[2])/p[3]) +p[4]*numpy.exp(-(x+p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2))
else:
return y - yd
def taucurveder(self, p, x):
"""
Derivative for taucurve
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
y = -(p[1]*numpy.exp((p[2] + x)/p[3])/p[3] - p[4]*numpy.exp(-(p[5] + x)/p[6])/p[6])/(p[1]*numpy.exp((p[2] + x)/p[3]) +
p[4]*numpy.exp(-(p[5] + x)/p[6]))**2.0
# print 'dy: ', y
return y
def getClipData(self, x, y, t0, t1):
"""
Return the values in y that match the x range in tx from
t0 to t1. x must be monotonic increasing or decreasing.
Allow for reverse ordering. """
it0 = (numpy.abs(x-t0)).argmin()
it1 = (numpy.abs(x-t1)).argmin()
if it0 > it1:
t = it1
it1 = it0
it0 = t
return(x[it0:it1], y[it0:it1])
def FitRegion(self, whichdata, thisaxis, tdat, ydat, t0 = None, t1 = None,
fitFunc = 'exp1', fitFuncDer = None, fitPars = None, fixedPars = None,
fitPlot = None, plotInstance = None, dataType= 'xy', method = None,
bounds=None, weights=None, constraints=()):
"""
**Arguments**
============= ===================================================
whichdata
thisaxis
tdat
ydat
t0 (optional) Minimum of time data - determined from tdat if left unspecified
t1 (optional) Maximum of time data - determined from tdat if left unspecified
fitFunc (optional) The function to fit the data to (as defined in __init__). Default is 'exp1'.
fitFuncDer (optional) default=None
fitPars (optional) Initial fit parameters. Use the values defined in self.fitfuncmap if unspecified.
fixedPars (optional) Fixed parameters to pass to the function. Default=None
fitPlot (optional) default=None
plotInstance (optional) default=None
dataType (optional) Options are ['xy', 'blocks']. Default='xy'
method (optional) Options are ['curve_fit', 'fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B', 'openopt']. Default='leastsq'
bounds (optional) default=None
weights (optional) default=None
constraints (optional) default=()
============= ===================================================
To call with tdat and ydat as simple arrays:
FitRegion(1, 0, tdat, ydat, FitFunc = 'exp1')
e.g., the first argument should be 1, but this axis is ignored if datatype is 'xy'
"""
self.fitSum2Err = 0.0
if t0 == t1:
if plotInstance is not None and usingMPlot:
(x, y) = plotInstance.getCoordinates()
t0 = x[0]
t1 = x[1]
if t1 is None:
t1 = numpy.max(tdat)
if t0 is None:
t0 = numpy.min(tdat)
func = self.fitfuncmap[fitFunc]
if func is None:
print "FitRegion: unknown function %s" % (fitFunc)
return
xp = []
xf = []
yf = []
yn = []
tx = []
names = func[6]
if fitPars is None:
fpars = func[1]
else:
fpars = fitPars
if method == 'simplex': # remap calls if needed for newer versions of scipy (>= 0.11)
method = 'Nelder-Mead'
if ydat.ndim == 1 or dataType == 'xy' or dataType == '2d': # check if 1-d, then "pretend" its only a 1-element block
nblock = 1
else:
nblock = ydat.shape[0] # otherwise, this is the number of traces in the block
# print 'datatype: ', dataType
# print 'nblock: ', nblock
# print 'whichdata: ', whichdata
# for block in range(nblock):
for record in whichdata:
if dataType == 'blocks':
(tx, dy) = self.getClipData(tdat[block], ydat[block][record, thisaxis, :], t0, t1)
else:
(tx, dy) = self.getClipData(tdat, ydat[record,:], t0, t1)
# print 'Fitting.py: block, type, Fit data: ', block, dataType
# print tx.shape
# print dy.shape
yn.append(names)
if not any(tx):
continue # no data in the window...
ier = 0
#
# Different optimization methods are included here. Not all have been tested fully with
# this wrapper.
#
if method is None or method == 'leastsq': # use standard leastsq, no bounds
plsq, cov, infodict, mesg, ier = scipy.optimize.leastsq(func[0], fpars,
args=(tx.astype('float64'), dy.astype('float64'), fixedPars),
full_output = 1, maxfev = func[2])
if ier > 4:
print "optimize.leastsq error flag is: %d" % (ier)
print mesg
elif method == 'curve_fit':
print fpars
print fixedPars
plsq, cov = scipy.optimize.curve_fit(func[0], tx.astype('float64'), dy.astype('float64'), p0=fpars)
ier = 0
elif method in ['fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']: # use standard wrapper from scipy for those routintes
res = scipy.optimize.minimize(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
method=method, jac=None, hess=None, hessp=None, bounds=bounds, constraints=constraints, tol=None, callback=None,
options={'maxiter': func[2], 'disp': False })
plsq = res.x
#print " method:", method
#print " bounds:", bounds
#print " result:", plsq
# next section is replaced by the code above - kept here for reference if needed...
# elif method == 'fmin' or method == 'simplex':
# plsq = scipy.optimize.fmin(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
# maxfun = func[2]) # , iprint=0)
# ier = 0
# elif method == 'bfgs':
# plsq, cov, infodict = scipy.optimize.fmin_l_bfgs_b(func[0], fpars, fprime=func[8],
# args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True, weights),
# maxfun = func[2], bounds = bounds,
# approx_grad = True) # , disp=0, iprint=-1)
elif method == 'openopt': # use OpenOpt's routines - usually slower, but sometimes they converge better
if not HAVE_OPENOPT:
raise Exception("Requested openopt fitting method but openopt is not installed.")
if bounds is not None:
# unpack bounds
lb = [y[0] for y in bounds]
ub = [y[1] for y in bounds]
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer, lb=lb, ub=ub)
# fopt.df = func[8]
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer)
print func[8]
# fopt.df = func[7]
fopt.checkdf()
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
print 'method %s not recognized, please check Fitting.py' % (method)
return
xfit = numpy.arange(min(tx), max(tx), (max(tx)-min(tx))/100.0)
yfit = func[0](plsq, xfit, C=fixedPars)
yy = func[0](plsq, tx, C=fixedPars) # calculate function
self.fitSum2Err = numpy.sum((dy - yy)**2)
if usingMPlot and FitPlot != None and plotInstance != None:
self.FitPlot(xFit = xfit, yFit = yfit, fitFunc = fund[0],
fitPars = plsq, plot = fitPlot, plotInstance = plotInstance)
xp.append(plsq) # parameter list
xf.append(xfit) # x plot point list
yf.append(yfit) # y fit point list
# print xp
# print len(xp)
return(xp, xf, yf, yn) # includes names with yn and range of tx
def FitPlot(self, xFit = None, yFit = None, fitFunc = 'exp1',
fitPars = None, fixedPars = None, fitPlot=None, plotInstance = None,
color=None):
""" Plot the fit data onto the fitPlot with the specified "plot Instance".
if there is no xFit, or some parameters are missing, we just return.
if there is xFit, but no yFit, then we try to compute the fit with
what we have. The plot is superimposed on the specified "fitPlot" and
the color is specified by the function color in the fitPars list.
"""
if xFit is None or fitPars is None:
return
func = self.fitfuncmap[fitFunc]
if color is None:
fcolor = func[3]
else:
fcolor = color
if yFit is None:
yFit = numpy.array([])
for k in range(0, len(fitPars)):
yFit[k] = func[0](fitPars[k], xFit[k], C=fixedPars)
if plotInstance is None or fitPlot is None:
return(yfit)
for k in range(0, len(fitPars)):
plotInstance.PlotLine(fitPlot, xFit[k], yFit[k], color = fcolor)
return(yfit)
def getFitErr(self):
""" Return the fit error for the most recent fit
"""
return(self.fitSum2Err)
def expfit(self, x, y):
""" find best fit of a single exponential function to x and y
using the chebyshev polynomial approximation.
returns (DC, A, tau) for fit.
Perform a single exponential fit to data using Chebyshev polynomial method.
Equation fit: y = a1 * exp(-x/tau) + a0
Call: [a0 a1 tau] = expfit(x,y);
Calling parameter x is the time base, y is the data to be fit.
Returned values: a0 is the offset, a1 is the amplitude, tau is the time
constant (scaled in units of x).
Relies on routines chebftd to generate polynomial coeffs, and chebint to compute the
coefficients for the integral of the data. These are now included in this
.py file source.
This version is based on the one in the pClamp manual: HOWEVER, since
I use the bounded [-1 1] form for the Chebyshev polynomials, the coefficients are different,
and the resulting equation for tau is different. I manually optimized the tau
estimate based on fits to some simulated noisy data. (Its ok to use the whole range of d1 and d0
when the data is clean, but only the first few coeffs really hold the info when
the data is noisy.)
NOTE: The user is responsible for making sure that the passed data is appropriate,
e.g., no large noise or electronic transients, and that the time constants in the
data are adequately sampled.
To do a double exp fit with this method is possible, but more complex.
It would be computationally simpler to try breaking the data into two regions where
the fast and slow components are dominant, and fit each separately; then use that to
seed a non-linear fit (e.g., L-M) algorithm.
Final working version 4/13/99 Paul B. Manis
converted to Python 7/9/2009 Paul B. Manis. Seems functional.
"""
n = 30; # default number of polynomials coeffs to use in fit
a = numpy.amin(x)
b = numpy.amax(x)
d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...
d1 = self.chebint(a, b, d0, n) # coeffs of integral...
tau = -numpy.mean(d1[2:3]/d0[2:3])
try:
g = numpy.exp(-x/tau)
except:
g = 0.0
dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function
# now estimate the amplitude from the ratios of the coeffs.
a1 = self.estimate(d0, dg, 1)
a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here
return(a0, a1, tau)#
def estimate(self, c, d, m):
""" compute optimal estimate of parameter from arrays of data """
n = len(c)
a = sum(c[m:n]*d[m:n])/sum(d[m:n]**2.0)
return(a)
# note : the following routine is a bottleneck. It should be coded in C.
def chebftd(self, a, b, n, t, d):
""" Chebyshev fit; from Press et al, p 192.
matlab code P. Manis 21 Mar 1999
"Given a function func, lower and upper limits of the interval [a,b], and
a maximum degree, n, this routine computes the n coefficients c[1..n] such that
func(x) sum(k=1, n) of ck*Tk(y) - c0/2, where y = (x -0.5*(b+a))/(0.5*(b-a))
This routine is to be used with moderately large n (30-50) the array of c's is
subsequently truncated at the smaller value m such that cm and subsequent
terms are negligible."
This routine is modified so that we find close points in x (data array) - i.e., we find
the best Chebyshev terms to describe the data as if it is an arbitrary function.
t is the x data, d is the y data...
"""
bma = 0.5*(b-a)
bpa = 0.5*(b+a)
inc = t[1]-t[0]
f = numpy.zeros(n)
for k in range(0, n):
y = numpy.cos(numpy.pi*(k+0.5)/n)
pos = int(0.5+(y*bma+bpa)/inc)
if pos < 0:
pos = 0
if pos >= len(d)-2:
pos = len(d)-2
try:
f[k]= d[pos+1]
except:
print "error in chebftd: k = %d (len f = %d) pos = %d, len(d) = %d\n" % (k, len(f), pos, len(d))
print "you should probably make sure this doesn't happen"
fac = 2.0/n
c=numpy.zeros(n)
for j in range(0, n):
sum=0.0
for k in range(0, n):
sum = sum + f[k]*numpy.cos(numpy.pi*j*(k+0.5)/n)
c[j]=fac*sum
return(c)
def chebint(self, a, b, c, n):
""" Given a, b, and c[1..n] as output from chebft or chebftd, and given n,
the desired degree of approximation (length of c to be used),
this routine computes cint, the Chebyshev coefficients of the
integral of the function whose coeffs are in c. The constant of
integration is set so that the integral vanishes at a.
Coded from Press et al, 3/21/99 P. Manis (Matlab)
Python translation 7/8/2009 P. Manis
"""
sum = 0.0
fac = 1.0
con = 0.25*(b-a) # factor that normalizes the interval
cint = numpy.zeros(n)
for j in range(1,n-2):
cint[j]=con*(c[j-1]-c[j+1])/j
sum = sum + fac * cint[j]
fac = - fac
cint[n-1] = con*c[n-2]/(n-1)
sum = sum + fac*cint[n-1]
cint[0] = 2.0*sum # set constant of integration.
return(cint)
# routine to flatten an array/list.
#
def flatten(self, l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
# flatten()
# run tests if we are "main"
if __name__ == "__main__":
# import matplotlib.pyplot as pyplot
import timeit
import Fitting
import matplotlib as MP
MP.use('Qt4Agg')
################## Do not modify the following code
# sets up matplotlib with sans-serif plotting...
import matplotlib.gridspec as GS
# import mpl_toolkits.axes_grid1.inset_locator as INSETS
# #import inset_axes, zoomed_inset_axes
# import mpl_toolkits.axes_grid1.anchored_artists as ANCHOR
# # import AnchoredSizeBar
stdFont = 'Arial'
import matplotlib.pyplot as pylab
pylab.rcParams['text.usetex'] = True
pylab.rcParams['interactive'] = False
pylab.rcParams['font.family'] = 'sans-serif'
pylab.rcParams['font.sans-serif'] = 'Arial'
pylab.rcParams['mathtext.default'] = 'sf'
pylab.rcParams['figure.facecolor'] = 'white'
# next setting allows pdf font to be readable in Adobe Illustrator
pylab.rcParams['pdf.fonttype'] = 42
pylab.rcParams['text.dvipnghack'] = True
##################### to here (matplotlib stuff - touchy!
Fits = Fitting.Fitting()
# x = numpy.arange(0, 100.0, 0.1)
# y = 5.0-2.5*numpy.exp(-x/5.0)+0.5*numpy.random.randn(len(x))
# (dc, aFit,tauFit) = Fits.expfit(x,y)
# yf = dc + aFit*numpy.exp(-x/tauFit)
# pyplot.figure(1)
# pyplot.plot(x,y,'k')
# pyplot.hold(True)
# pyplot.plot(x, yf, 'r')
# pyplot.show()
exploreError = False
if exploreError is True:
# explore the error surface for a function:
func = 'exppulse'
f = Fits.fitfuncmap[func]
p1range = numpy.arange(0.1, 5.0, 0.1)
p2range = numpy.arange(0.1, 5.0, 0.1)
err = numpy.zeros((len(p1range), len(p2range)))
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
# check exchange of tau1 ([1]) and width[4]
C = None
yOffset, t0, tau1, tau2, amp, width = f[1] # get inital parameters
y0 = f[0](f[1], x, C=C)
noise = numpy.random.random(y0.shape) - 0.5
y0 += 0.0* noise
sh = err.shape
yp = numpy.zeros((sh[0], sh[1], len(y0)))
for i, p1 in enumerate(p1range):
tau1t = tau1*p1
for j, p2 in enumerate(p2range):
ampt = amp*p2
pars = (yOffset, t0, tau1t, tau2, ampt, width) # repackage
err[i,j] = f[0](pars, x, y0, C=C, sumsq = True)
yp[i,j] = f[0](pars, x, C=C, sumsq = False)
pylab.figure()
CS=pylab.contour(p1range*tau1, p2range*width, err, 25)
CB = pylab.colorbar(CS, shrink=0.8, extend='both')
pylab.figure()
for i, p1 in enumerate(p1range):
for j, p2 in enumerate(p2range):
pylab.plot(x, yp[i,j])
pylab.plot(x, y0, 'r-', linewidth=2.0)
# run tests for each type of fit, return results to compare parameters
cons = None
bnds = None
signal_to_noise = 100000.
for func in Fits.fitfuncmap:
if func != 'exppulse':
continue
print "\nFunction: %s\nTarget: " % (func),
f = Fits.fitfuncmap[func]
for k in range(0,len(f[1])):
print "%f " % (f[1][k]),
print "\nStarting: ",
for k in range(0,len(f[5])):
print "%f " % (f[5][k]),
# nstep = 500.0
# if func == 'sin':
# nstep = 100.0
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
if func == 'exppulse':
C = f[7]
y = f[0](f[1], x, C=C)
yd = numpy.array(y)
noise = numpy.random.normal(0, 0.1, yd.shape)
my = numpy.amax(yd)
#yd = yd + sigmax*0.05*my*(numpy.random.random_sample(shape(yd))-0.5)
yd += noise*my/signal_to_noise
testMethod = 'SLSQP'
if func == 'taucurve':
continue
bounds=[(0., 100.), (0., 1000.), (0.0, 500.0), (0.1, 50.0),
(0., 1000), (0.0, 500.0), (0.1, 50.0)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'boltz':
continue
bounds = [(-0.5,0.5), (0.0, 20.0), (-120., 0.), (-20., 0.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exp2':
bounds=[(-0.001, 0.001), (-5.0, 0.), (1.0, 500.0), (-5.0, 0.0),
(1., 10000.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exppulse':
# set some constraints to the fitting
# yOffset, tau1, tau2, amp, width = f[1] # order of constraings
dt = numpy.mean(numpy.diff(x))
bounds = [(-5, 5), (-15., 15.), (-2, 2.0), (2-10, 10.), (-5, 5.), (0., 5.)]
# cxample for constraints:
# cons = ({'type': 'ineq', 'fun': lambda x: x[4] - 3.0*x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 12*x[2]},
# {'type': 'ineq', 'fun': lambda x: x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 2000},
# )
cons = ({'type': 'ineq', 'fun': lambda x: x[3] - x[2] }, # tau1 < tau2
)
C = None
tv = f[5]
initialgr = f[0](f[5], x, None )
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bounds, method=testMethod)
# print xf
# print yf
# print fpar
# print names
else:
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bnds, method=testMethod)
#print fpar
s = numpy.shape(fpar)
j = 0
outstr = ""
initstr = ""
truestr = ""
for i in range(0, len(names[j])):
# print "%f " % fpar[j][i],
outstr = outstr + ('%s = %f, ' % (names[j][i], fpar[j][i]))
initstr = initstr + '%s = %f, ' % (names[j][i], tv[i])
truestr = truestr + '%s = %f, ' % (names[j][i], f[1][i])
print( "\nTrue(%d) : %s" % (j, truestr) )
print( "FIT(%d) : %s" % (j, outstr) )
print( "init(%d) : %s" % (j, initstr) )
print( "Error: : %f" % (Fits.fitSum2Err))
if func is 'exppulse':
pylab.figure()
pylab.plot(numpy.array(x), yd, 'ro-')
pylab.hold(True)
pylab.plot(numpy.array(x), initialgr, 'k--')
pylab.plot(xf[0], yf[0], 'b-') # fit
pylab.show()
| mit |
dingmingliu/quanttrade | bt/core.py | 1 | 37660 | """
Contains the core building blocks of the framework.
"""
import math
from copy import deepcopy
import pandas as pd
import numpy as np
import cython as cy
class Node(object):
"""
The Node is the main building block in bt's tree structure design.
Both StrategyBase and SecurityBase inherit Node. It contains the
core functionality of a tree node.
Args:
* name (str): The Node name
* parent (Node): The parent Node
* children (dict, list): A collection of children. If dict,
the format is {name: child}, if list then list of children.
Attributes:
* name (str): Node name
* parent (Node): Node parent
* root (Node): Root node of the tree (topmost node)
* children (dict): Node's children
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Node is stale and need
updating
* prices (TimeSeries): Prices of the Node. Prices for a security will
be the security's price, for a strategy it will be an index that
reflects the value of the strategy over time.
* price (float): last price
* value (float): last value
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Node + node's children
"""
_price = cy.declare(cy.double)
_value = cy.declare(cy.double)
_weight = cy.declare(cy.double)
_issec = cy.declare(cy.bint)
_has_strat_children = cy.declare(cy.bint)
def __init__(self, name, parent=None, children=None):
self.name = name
# strategy children helpers
self._has_strat_children = False
self._strat_children = []
# if children is not None, we assume that we want to limit the
# available children space to the provided list.
if children is not None:
if isinstance(children, list):
# if all strings - just save as universe_filter
if all(isinstance(x, str) for x in children):
self._universe_tickers = children
# empty dict - don't want to uselessly create
# tons of children when they might not be needed
children = {}
else:
# this will be case if we pass in children
# (say a bunch of sub-strategies)
tmp = {}
ut = []
for c in children:
if type(c) == str:
tmp[c] = SecurityBase(c)
ut.append(c)
else:
# deepcopy object for possible later reuse
tmp[c.name] = deepcopy(c)
# if strategy, turn on flag and add name to list
# strategy children have special treatment
if isinstance(c, StrategyBase):
self._has_strat_children = True
self._strat_children.append(c.name)
# if not strategy, then we will want to add this to
# universe_tickers to filter on setup
else:
ut.append(c.name)
children = tmp
# we want to keep whole universe in this case
# so set to None
self._universe_tickers = ut
if parent is None:
self.parent = self
self.root = self
else:
self.parent = parent
self.root = parent.root
parent._add_child(self)
# default children
if children is None:
children = {}
self._universe_tickers = None
self.children = children
self._childrenv = children.values()
for c in self._childrenv:
c.parent = self
c.root = self.root
# set default value for now
self.now = 0
# make sure root has stale flag
# used to avoid unncessary update
# sometimes we change values in the tree and we know that we will need
# to update if another node tries to access a given value (say weight).
# This avoid calling the update until it is actually needed.
self.root.stale = False
# helper vars
self._price = 0
self._value = 0
self._weight = 0
# is security flag - used to avoid updating 0 pos securities
self._issec = False
def __getitem__(self, key):
return self.children[key]
@property
def prices(self):
"""
A TimeSeries of the Node's price.
"""
# can optimize depending on type -
# securities don't need to check stale to
# return latest prices, whereas strategies do...
raise NotImplementedError()
@property
def price(self):
"""
Current price of the Node
"""
# can optimize depending on type -
# securities don't need to check stale to
# return latest prices, whereas strategies do...
raise NotImplementedError()
@property
def value(self):
"""
Current value of the Node
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._value
@property
def weight(self):
"""
Current weight of the Node (with respect to the parent).
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._weight
def setup(self, dates):
"""
Setup method used to initialize a Node with a set of dates.
"""
raise NotImplementedError()
def _add_child(self, child):
child.parent = self
child.root = self.root
if self.children is None:
self.children = {child.name: child}
else:
self.children[child.name] = child
self._childrenv = self.children.values()
def update(self, date, data=None, inow=None):
"""
Update Node with latest date, and optionally some data.
"""
raise NotImplementedError()
def adjust(self, amount, update=True, isflow=True):
"""
Adjust Node value by amount.
"""
raise NotImplementedError()
def allocate(self, amount, update=True):
"""
Allocate capital to Node.
"""
raise NotImplementedError()
@property
def members(self):
"""
Node members. Members include current node as well as Node's
children.
"""
res = [self]
for c in self.children.values():
res.extend(c.members)
return res
@property
def full_name(self):
if self.parent == self:
return self.name
else:
return '%s>%s' % (self.parent.full_name, self.name)
class StrategyBase(Node):
"""
Strategy Node. Used to define strategy logic within a tree.
A Strategy's role is to allocate capital to it's children
based on a function.
Args:
* name (str): Strategy name
* children (dict, list): A collection of children. If dict,
the format is {name: child}, if list then list of children.
Children can be any type of Node.
* parent (Node): The parent Node
Attributes:
* name (str): Strategy name
* parent (Strategy): Strategy parent
* root (Strategy): Root node of the tree (topmost node)
* children (dict): Strategy's children
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Strategy is stale and need
updating
* prices (TimeSeries): Prices of the Strategy - basically an index that
reflects the value of the strategy over time.
* price (float): last price
* value (float): last value
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Strategy + strategy's children
* commission_fn (fn(quantity, price)): A function used to determine the
commission (transaction fee) amount. Could be used to model slippage
(implementation shortfall). Note that often fees are symmetric for
buy and sell and absolute value of quantity should be used for
calculation.
* capital (float): Capital amount in Strategy - cash
* universe (DataFrame): Data universe available at the current time.
Universe contains the data passed in when creating a Backtest. Use
this data to determine strategy logic.
"""
_capital = cy.declare(cy.double)
_net_flows = cy.declare(cy.double)
_last_value = cy.declare(cy.double)
_last_price = cy.declare(cy.double)
_last_fee = cy.declare(cy.double)
_paper_trade = cy.declare(cy.bint)
bankrupt = cy.declare(cy.bint)
def __init__(self, name, children=None, parent=None):
Node.__init__(self, name, children=children, parent=parent)
self._capital = 0
self._weight = 1
self._value = 0
self._price = 100
# helper vars
self._net_flows = 0
self._last_value = 0
self._last_price = 100
self._last_fee = 0
# default commission function
self.commission_fn = self._dflt_comm_fn
self._paper_trade = False
self._positions = None
self.bankrupt = False
@property
def price(self):
"""
Current price.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._price
@property
def prices(self):
"""
TimeSeries of prices.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._prices.ix[:self.now]
@property
def values(self):
"""
TimeSeries of values.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._values.ix[:self.now]
@property
def capital(self):
"""
Current capital - amount of unallocated capital left in strategy.
"""
# no stale check needed
return self._capital
@property
def cash(self):
"""
TimeSeries of unallocated capital.
"""
# no stale check needed
return self._cash
@property
def fees(self):
"""
TimeSeries of fees.
"""
# no stale check needed
return self._fees
@property
def universe(self):
"""
Data universe available at the current time.
Universe contains the data passed in when creating a Backtest.
Use this data to determine strategy logic.
"""
# avoid windowing every time
# if calling and on same date return
# cached value
if self.now == self._last_chk:
return self._funiverse
else:
self._last_chk = self.now
self._funiverse = self._universe.ix[:self.now]
return self._funiverse
@property
def positions(self):
"""
TimeSeries of positions.
"""
# if accessing and stale - update first
if self.root.stale:
self.root.update(self.root.now, None)
if self._positions is not None:
return self._positions
else:
vals = pd.DataFrame({x.name: x.positions for x in self.members
if isinstance(x, SecurityBase)})
self._positions = vals
return vals
def setup(self, universe):
"""
Setup strategy with universe. This will speed up future calculations
and updates.
"""
# save full universe in case we need it
self._original_data = universe
# determine if needs paper trading
# and setup if so
if self is not self.parent:
self._paper_trade = True
self._paper_amount = 1000000
paper = deepcopy(self)
paper.parent = paper
paper.root = paper
paper._paper_trade = False
paper.setup(self._original_data)
paper.adjust(self._paper_amount)
self._paper = paper
# setup universe
funiverse = universe
if self._universe_tickers is not None:
# if we have universe_tickers defined, limit universe to
# those tickers
valid_filter = list(set(universe.columns)
.intersection(self._universe_tickers))
funiverse = universe[valid_filter].copy()
# if we have strat children, we will need to create their columns
# in the new universe
if self._has_strat_children:
for c in self._strat_children:
funiverse[c] = np.nan
# must create to avoid pandas warning
funiverse = pd.DataFrame(funiverse)
self._universe = funiverse
# holds filtered universe
self._funiverse = funiverse
self._last_chk = None
# We're not bankrupt yet
self.bankrupt = False
# setup internal data
self.data = pd.DataFrame(index=funiverse.index,
columns=['price', 'value', 'cash', 'fees'],
data=0.0)
self._prices = self.data['price']
self._values = self.data['value']
self._cash = self.data['cash']
self._fees = self.data['fees']
# setup children as well - use original universe here - don't want to
# pollute with potential strategy children in funiverse
if self.children is not None:
[c.setup(universe) for c in self._childrenv]
@cy.locals(newpt=cy.bint, val=cy.double, ret=cy.double)
def update(self, date, data=None, inow=None):
"""
Update strategy. Updates prices, values, weight, etc.
"""
# resolve stale state
self.root.stale = False
# update helpers on date change
# also set newpt flag
newpt = False
if self.now == 0:
newpt = True
elif date != self.now:
self._net_flows = 0
self._last_price = self._price
self._last_value = self._value
self._last_fee = 0.0
newpt = True
# update now
self.now = date
if inow is None:
if self.now == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# update children if any and calculate value
val = self._capital # default if no children
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
c.update(date, data, inow)
val += c.value
if self.root == self:
if (val < 0) and not self.bankrupt:
# Declare a bankruptcy
self.bankrupt = True
self.flatten()
# update data if this value is different or
# if now has changed - avoid all this if not since it
# won't change
if newpt or self._value != val:
self._value = val
self._values.values[inow] = val
try:
ret = self._value / (self._last_value
+ self._net_flows) - 1
except ZeroDivisionError:
if self._value == 0:
ret = 0
else:
raise ZeroDivisionError(
'Could not update %s. Last value '
'was %s and net flows were %s. Current'
'value is %s. Therefore, '
'we are dividing by zero to obtain the return '
'for the period.' % (self.name,
self._last_value,
self._net_flows,
self._value))
self._price = self._last_price * (1 + ret)
self._prices.values[inow] = self._price
# update children weights
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
try:
c._weight = c.value / val
except ZeroDivisionError:
c._weight = 0.0
# if we have strategy children, we will need to update them in universe
if self._has_strat_children:
for c in self._strat_children:
# TODO: optimize ".loc" here as well
self._universe.loc[date, c] = self.children[c].price
# Cash should track the unallocated capital at the end of the day, so
# we should update it every time we call "update".
# Same for fees
self._cash.values[inow] = self._capital
self._fees.values[inow] = self._last_fee
# update paper trade if necessary
if newpt and self._paper_trade:
self._paper.update(date)
self._paper.run()
self._paper.update(date)
# update price
self._price = self._paper.price
self._prices.values[inow] = self._price
@cy.locals(amount=cy.double, update=cy.bint, flow=cy.bint, fees=cy.double)
def adjust(self, amount, update=True, flow=True, fee=0.0):
"""
Adjust capital - used to inject capital to a Strategy. This injection
of capital will have no effect on the children.
Args:
* amount (float): Amount to adjust by.
* update (bool): Force update?
* flow (bool): Is this adjustment a flow? Basically a flow will
have an impact on the price index. Examples of flows are
commissions.
"""
# adjust capital
self._capital += amount
self._last_fee += fee
# if flow - increment net_flows - this will not affect
# performance. Commissions and other fees are not flows since
# they have a performance impact
if flow:
self._net_flows += amount
if update:
# indicates that data is now stale and must
# be updated before access
self.root.stale = True
@cy.locals(amount=cy.double, update=cy.bint)
def allocate(self, amount, child=None, update=True):
"""
Allocate capital to Strategy. By default, capital is allocated
recursively down the children, proportionally to the children's
weights. If a child is specified, capital will be allocated
to that specific child.
Allocation also have a side-effect. They will deduct the same amount
from the parent's "account" to offset the allocation. If there is
remaining capital after allocation, it will remain in Strategy.
Args:
* amount (float): Amount to allocate.
* child (str): If specified, allocation will be directed to child
only. Specified by name.
* update (bool): Force update.
"""
# allocate to child
if child is not None:
if child not in self.children:
c = SecurityBase(child)
c.setup(self._universe)
# update to bring up to speed
c.update(self.now)
# add child to tree
self._add_child(c)
# allocate to child
self.children[child].allocate(amount)
# allocate to self
else:
# adjust parent's capital
# no need to update now - avoids repetition
if self.parent == self:
self.parent.adjust(-amount, update=False, flow=True)
else:
# do NOT set as flow - parent will be another strategy
# and therefore should not incur flow
self.parent.adjust(-amount, update=False, flow=False)
# adjust self's capital
self.adjust(amount, update=False, flow=True)
# push allocation down to children if any
# use _weight to avoid triggering an update
if self.children is not None:
[c.allocate(amount * c._weight, update=False)
for c in self._childrenv]
# mark as stale if update requested
if update:
self.root.stale = True
@cy.locals(delta=cy.double, weight=cy.double, base=cy.double)
def rebalance(self, weight, child, base=np.nan, update=True):
"""
Rebalance a child to a given weight.
This is a helper method to simplify code logic. This method is used
when we want to se the weight of a particular child to a set amount.
It is similar to allocate, but it calculates the appropriate allocation
based on the current weight.
Args:
* weight (float): The target weight. Usually between -1.0 and 1.0.
* child (str): child to allocate to - specified by name.
* base (float): If specified, this is the base amount all weight
delta calculations will be based off of. This is useful when we
determine a set of weights and want to rebalance each child
given these new weights. However, as we iterate through each
child and call this method, the base (which is by default the
current value) will change. Therefore, we can set this base to
the original value before the iteration to ensure the proper
allocations are made.
* update (bool): Force update?
"""
# if weight is 0 - we want to close child
if weight == 0:
if child in self.children:
return self.close(child)
else:
return
# if no base specified use self's value
if np.isnan(base):
base = self.value
# else make sure we have child
if child not in self.children:
c = SecurityBase(child)
c.setup(self._universe)
# update child to bring up to speed
c.update(self.now)
self._add_child(c)
# allocate to child
# figure out weight delta
c = self.children[child]
delta = weight - c.weight
c.allocate(delta * base)
def close(self, child):
"""
Close a child position - alias for rebalance(0, child). This will also
flatten (close out all) the child's children.
Args:
* child (str): Child, specified by name.
"""
c = self.children[child]
# flatten if children not None
if c.children is not None and len(c.children) != 0:
c.flatten()
c.allocate(-c.value)
def flatten(self):
"""
Close all child positions.
"""
# go right to base alloc
[c.allocate(-c.value) for c in self._childrenv if c.value != 0]
def run(self):
"""
This is the main logic method. Override this method to provide some
algorithm to execute on each date change. This method is called by
backtester.
"""
pass
def set_commissions(self, fn):
"""
Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount.
"""
self.commission_fn = fn
for c in self._childrenv:
if isinstance(c, StrategyBase):
c.set_commissions(fn)
@cy.locals(q=cy.double, p=cy.double)
def _dflt_comm_fn(self, q, p):
return max(1, abs(q) * 0.01)
class SecurityBase(Node):
"""
Security Node. Used to define a security within a tree.
A Security's has no children. It simply models an asset that can be bought
or sold.
Args:
* name (str): Security name
* multiplier (float): security multiplier - typically used for
derivatives.
Attributes:
* name (str): Security name
* parent (Security): Security parent
* root (Security): Root node of the tree (topmost node)
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Security is stale and need
updating
* prices (TimeSeries): Security prices.
* price (float): last price
* value (float): last value - basically position * price * multiplier
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Security + strategy's children
* position (float): Current position (quantity).
"""
_last_pos = cy.declare(cy.double)
_position = cy.declare(cy.double)
multiplier = cy.declare(cy.double)
_prices_set = cy.declare(cy.bint)
_needupdate = cy.declare(cy.bint)
@cy.locals(multiplier=cy.double)
def __init__(self, name, multiplier=1):
Node.__init__(self, name, parent=None, children=None)
self._value = 0
self._price = 0
self._weight = 0
self._position = 0
self.multiplier = multiplier
# opt
self._last_pos = 0
self._issec = True
self._needupdate = True
@property
def price(self):
"""
Current price.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._price
@property
def prices(self):
"""
TimeSeries of prices.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._prices.ix[:self.now]
@property
def values(self):
"""
TimeSeries of values.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._values.ix[:self.now]
@property
def position(self):
"""
Current position
"""
# no stale check needed
return self._position
@property
def positions(self):
"""
TimeSeries of positions.
"""
# if accessing and stale - update first
if self._needupdate:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._positions.ix[:self.now]
def setup(self, universe):
"""
Setup Security with universe. Speeds up future runs.
Args:
* universe (DataFrame): DataFrame of prices with security's name as
one of the columns.
"""
# if we already have all the prices, we will store them to speed up
# future udpates
try:
prices = universe[self.name]
except KeyError:
prices = None
# setup internal data
if prices is not None:
self._prices = prices
self.data = pd.DataFrame(index=universe.index,
columns=['value', 'position'],
data=0.0)
self._prices_set = True
else:
self.data = pd.DataFrame(index=universe.index,
columns=['price', 'value', 'position'])
self._prices = self.data['price']
self._prices_set = False
self._values = self.data['value']
self._positions = self.data['position']
@cy.locals(prc=cy.double)
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
# filter for internal calls when position has not changed - nothing to
# do. Internal calls (stale root calls) have None data. Also want to
# make sure date has not changed, because then we do indeed want to
# update.
if date == self.now and self._last_pos == self._position:
return
if inow is None:
if date == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# date change - update price
if date != self.now:
# update now
self.now = date
if self._prices_set:
self._price = self._prices.values[inow]
# traditional data update
elif data is not None:
prc = data[self.name]
self._price = prc
self._prices.values[inow] = prc
self._positions.values[inow] = self._position
self._last_pos = self._position
self._value = self._position * self._price * self.multiplier
self._values.values[inow] = self._value
if self._weight == 0 and self._position == 0:
self._needupdate = False
@cy.locals(amount=cy.double, update=cy.bint, q=cy.double, outlay=cy.double)
def allocate(self, amount, update=True):
"""
This allocates capital to the Security. This is the method used to
buy/sell the security.
A given amount of shares will be determined on the current price, a
commisison will be calculated based on the parent's commission fn, and
any remaining capital will be passed back up to parent as an
adjustment.
Args:
* amount (float): Amount of adjustment.
* update (bool): Force update?
"""
# will need to update if this has been idle for a while...
# update if needupdate or if now is stale
# fetch parent's now since our now is stale
if self._needupdate or self.now != self.parent.now:
self.update(self.parent.now)
# ignore 0 alloc
# Note that if the price of security has dropped to zero, then it should
# never be selected by SelectAll, SelectN etc. I.e. we should not open
# the position at zero price. At the same time, we are able to close
# it at zero price, because at that point amount=0.
# Note also that we don't erase the position in an asset which price has
# dropped to zero (though the weight will indeed be = 0)
if amount == 0:
return
if self.parent is self or self.parent is None:
raise Exception(
'Cannot allocate capital to a parentless security')
if self._price == 0 or np.isnan(self._price):
raise Exception(
'Cannot allocate capital to '
'%s because price is 0 or nan as of %s'
% (self.name, self.parent.now))
# buy/sell
# determine quantity - must also factor in commission
# closing out?
if amount == -self._value:
q = -self._position
else:
if (self._position > 0) or ((self._position == 0) and (amount > 0)):
# if we're going long or changing long position
q = math.floor(amount / (self._price * self.multiplier))
else:
# if we're going short or changing short position
q = math.ceil(amount / (self._price * self.multiplier))
# if q is 0 nothing to do
if q == 0 or np.isnan(q):
return
# this security will need an update, even if pos is 0 (for example if
# we close the positions, value and pos is 0, but still need to do that
# last update)
self._needupdate = True
# adjust position & value
self._position += q
# calculate proper adjustment for parent
# parent passed down amount so we want to pass
# -outlay back up to parent to adjust for capital
# used
outlay, fee = self.outlay(q)
# call parent
self.parent.adjust(-outlay, update=update, flow=False, fee=fee)
@cy.locals(q=cy.double, p=cy.double)
def commission(self, q, p):
"""
Calculates the commission (transaction fee) based on quantity and price.
Uses the parent's commission_fn.
Args:
* q (float): quantity
* p (float): price
"""
return self.parent.commission_fn(q, p)
@cy.locals(q=cy.double)
def outlay(self, q):
"""
Determines the complete cash outlay (including commission) necessary
given a quantity q.
Second returning parameter is a commission itself.
Args:
* q (float): quantity
"""
fee = self.commission(q, self._price * self.multiplier)
full_outlay = q * self._price * self.multiplier + fee
return full_outlay, fee
def run(self):
"""
Does nothing - securities have nothing to do on run.
"""
pass
class Algo(object):
"""
Algos are used to modularize strategy logic so that strategy logic becomes
modular, composable, more testable and less error prone. Basically, the
Algo should follow the unix philosophy - do one thing well.
In practice, algos are simply a function that receives one argument, the
Strategy (refered to as target) and are expected to return a bool.
When some state preservation is necessary between calls, the Algo
object can be used (this object). The __call___ method should be
implemented and logic defined therein to mimic a function call. A
simple function may also be used if no state preservation is neceesary.
Args:
* name (str): Algo name
"""
def __init__(self, name=None):
self._name = name
@property
def name(self):
"""
Algo name.
"""
if self._name is None:
self._name = self.__class__.__name__
return self._name
def __call__(self, target):
raise NotImplementedError("%s not implemented!" % self.name)
class AlgoStack(Algo):
"""
An AlgoStack derives from Algo runs multiple Algos until a
failure is encountered.
The purpose of an AlgoStack is to group a logic set of Algos together. Each
Algo in the stack is run. Execution stops if one Algo returns False.
Args:
* algos (list): List of algos.
"""
def __init__(self, *algos):
super(AlgoStack, self).__init__()
self.algos = algos
self.check_run_always = any(hasattr(x, 'run_always')
for x in self.algos)
def __call__(self, target):
# normal runing mode
if not self.check_run_always:
for algo in self.algos:
if not algo(target):
return False
return True
# run mode when at least one algo has a run_always attribute
else:
# store result in res
# allows continuation to check for and run
# algos that have run_always set to True
res = True
for algo in self.algos:
if res:
res = algo(target)
elif hasattr(algo, 'run_always'):
if algo.run_always:
algo(target)
return res
class Strategy(StrategyBase):
"""
Strategy expands on the StrategyBase and incorporates Algos.
Basically, a Strategy is built by passing in a set of algos. These algos
will be placed in an Algo stack and the run function will call the stack.
Furthermore, two class attributes are created to pass data between algos.
perm for permanent data, temp for temporary data.
Args:
* name (str): Strategy name
* algos (list): List of Algos to be passed into an AlgoStack
* children (dict, list): Children - useful when you want to create
strategies of strategies
Attributes:
* stack (AlgoStack): The stack
* temp (dict): A dict containing temporary data - cleared on each call
to run. This can be used to pass info to other algos.
* perm (dict): Permanent data used to pass info from one algo to
another. Not cleared on each pass.
"""
def __init__(self, name, algos=[], children=None):
super(Strategy, self).__init__(name, children=children)
self.stack = AlgoStack(*algos)
self.temp = {}
self.perm = {}
def run(self):
# clear out temp data
self.temp = {}
# run algo stack
self.stack(self)
# run children
for c in self.children.values():
c.run()
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
rnowling/pop-gen-models | single-pop/single_pop.py | 1 | 3379 | import sys
import numpy as np
import numpy.random as npr
from sklearn.neighbors.kde import KernelDensity
from scipy.special import gammaln
import matplotlib.pyplot as plt
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
class KDE_MCMC_Sampler(object):
def __init__(self, observed_counts):
"""
Observed counts is 3D matrix of pop, locus, haplotype
"""
self.observed_counts = observed_counts
self.individual_counts = observed_counts.sum(axis=2)
self.observed_frequencies = normalize_haplotypes(observed_counts)
self.n_loci, self.n_pop, self.n_haplotypes = self.observed_counts.shape
# from bamova
self.DWEIGHT = 1.0
self.DADD = 0.00001
self.SMALL_NUM = 0.0000000000001
print "initializing frequencies"
self.freq = np.zeros((self.n_loci, self.n_haplotypes))
for l in xrange(self.n_loci):
self.freq[l, :] = self.sample_locus_freq(self.observed_frequencies[l, 0, :])
def sample_locus_freq(self, freq):
alphas = self.DWEIGHT * freq + self.DADD + self.SMALL_NUM
return npr.dirichlet(alphas)
def locus_prob(self, locus_obs_counts, locus_freq):
log_prob_sum = 0.0
for p in xrange(self.n_pop):
log_prob_sum += log_multinomial(locus_obs_counts[p], locus_freq)
return log_prob_sum
def step(self):
total_log_prob = 0.0
for l in xrange(self.n_loci):
locus_indiv_counts = self.individual_counts[l, :]
locus_obs_counts = self.observed_counts[l, :, :]
log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
proposed_locus_freq = self.sample_locus_freq(self.freq[l, :])
proposed_log_prob = self.locus_prob(locus_obs_counts, proposed_locus_freq)
log_prob_ratio = proposed_log_prob - log_prob
log_r = np.log(npr.random())
if proposed_log_prob >= log_prob or log_r <= log_prob_ratio:
self.freq[l, :] = proposed_locus_freq
log_prob = proposed_log_prob
total_log_prob += log_prob
locus_prob = []
for l in xrange(self.n_loci):
log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
locus_prob.append(log_prob)
return self.freq, total_log_prob, locus_prob
def plot_log_prob(flname, log_probs):
plt.clf()
plt.hold(True)
plt.hist(log_probs, bins=30)
plt.xlabel("Log Probability", fontsize=16)
plt.xlim([min(log_probs), 0.0])
plt.ylabel("Occurrences (Loci)", fontsize=16)
plt.savefig(flname, DPI=200)
def simulate(occur_fl, n_steps, plot_flname, prob_flname):
print "reading occurrences"
observed_counts = read_counts(occur_fl)
individual_counts = observed_counts.sum(axis=2)
observed_frequencies = normalize_haplotypes(observed_counts)
sampler = KDE_MCMC_Sampler(observed_counts)
fl = open(prob_flname, "w")
locus_log_prob = []
for i in xrange(n_steps):
freq, log_prob, locus_log_prob = sampler.step()
print "step", i, "log prob", log_prob
if i % 100 == 0:
for j, prob in enumerate(locus_log_prob):
fl.write("%s %s %s\n" % (i, j, prob))
fl.close()
plot_log_prob(plot_flname, locus_log_prob)
if __name__ == "__main__":
occur_fl = sys.argv[1]
n_steps = int(sys.argv[2])
plot_flname = sys.argv[3]
prob_flname = sys.argv[4]
simulate(occur_fl, n_steps, plot_flname, prob_flname)
| apache-2.0 |
q1ang/seaborn | seaborn/tests/test_distributions.py | 14 | 8102 | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from . import PlotTestCase
from .. import distributions as dist
try:
import statsmodels.nonparametric.api
assert statsmodels.nonparametric.api
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
class TestKDE(PlotTestCase):
rs = np.random.RandomState(0)
x = rs.randn(50)
y = rs.randn(50)
kernel = "gau"
bw = "scott"
gridsize = 128
clip = (-np.inf, np.inf)
cut = 3
def test_scipy_univariate_kde(self):
"""Test the univariate KDE estimation with scipy."""
grid, y = dist._scipy_univariate_kde(self.x, self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._scipy_univariate_kde(self.x, bw, self.gridsize,
self.cut, self.clip)
@skipif(_no_statsmodels)
def test_statsmodels_univariate_kde(self):
"""Test the univariate KDE estimation with statsmodels."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._statsmodels_univariate_kde(self.x, self.kernel, bw,
self.gridsize, self.cut,
self.clip)
def test_scipy_bivariate_kde(self):
"""Test the bivariate KDE estimation with scipy."""
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, self.bw,
self.gridsize, self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
# Test a specific bandwidth
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, 1,
self.gridsize, self.cut, clip)
# Test that we get an error with an invalid bandwidth
with nt.assert_raises(ValueError):
dist._scipy_bivariate_kde(self.x, self.y, (1, 2),
self.gridsize, self.cut, clip)
@skipif(_no_statsmodels)
def test_statsmodels_bivariate_kde(self):
"""Test the bivariate KDE estimation with statsmodels."""
clip = [self.clip, self.clip]
x, y, z = dist._statsmodels_bivariate_kde(self.x, self.y, self.bw,
self.gridsize,
self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
@skipif(_no_statsmodels)
def test_statsmodels_kde_cumulative(self):
"""Test computation of cumulative KDE."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip,
cumulative=True)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
# make sure y is monotonically increasing
npt.assert_((np.diff(y) > 0).all())
def test_kde_cummulative_2d(self):
"""Check error if args indicate bivariate KDE and cumulative."""
with npt.assert_raises(TypeError):
dist.kdeplot(self.x, data2=self.y, cumulative=True)
def test_bivariate_kde_series(self):
df = pd.DataFrame({'x': self.x, 'y': self.y})
ax_series = dist.kdeplot(df.x, df.y)
ax_values = dist.kdeplot(df.x.values, df.y.values)
nt.assert_equal(len(ax_series.collections),
len(ax_values.collections))
nt.assert_equal(ax_series.collections[0].get_paths(),
ax_values.collections[0].get_paths())
class TestJointPlot(PlotTestCase):
rs = np.random.RandomState(sum(map(ord, "jointplot")))
x = rs.randn(100)
y = rs.randn(100)
data = pd.DataFrame(dict(x=x, y=y))
def test_scatter(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.collections), 1)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_reg(self):
g = dist.jointplot("x", "y", self.data, kind="reg")
nt.assert_equal(len(g.ax_joint.collections), 2)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_resid(self):
g = dist.jointplot("x", "y", self.data, kind="resid")
nt.assert_equal(len(g.ax_joint.collections), 1)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 0)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_hex(self):
g = dist.jointplot("x", "y", self.data, kind="hex")
nt.assert_equal(len(g.ax_joint.collections), 1)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_kde(self):
g = dist.jointplot("x", "y", self.data, kind="kde")
nt.assert_true(len(g.ax_joint.collections) > 0)
nt.assert_equal(len(g.ax_marg_x.collections), 1)
nt.assert_equal(len(g.ax_marg_y.collections), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_color(self):
g = dist.jointplot("x", "y", self.data, color="purple")
purple = mpl.colors.colorConverter.to_rgb("purple")
scatter_color = g.ax_joint.collections[0].get_facecolor()[0, :3]
nt.assert_equal(tuple(scatter_color), purple)
hist_color = g.ax_marg_x.patches[0].get_facecolor()[:3]
nt.assert_equal(hist_color, purple)
def test_annotation(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.legend_.get_texts()), 1)
g = dist.jointplot("x", "y", self.data, stat_func=None)
nt.assert_is(g.ax_joint.legend_, None)
def test_hex_customise(self):
# test that default gridsize can be overridden
g = dist.jointplot("x", "y", self.data, kind="hex",
joint_kws=dict(gridsize=5))
nt.assert_equal(len(g.ax_joint.collections), 1)
a = g.ax_joint.collections[0].get_array()
nt.assert_equal(28, a.shape[0]) # 28 hexagons expected for gridsize 5
def test_bad_kind(self):
with nt.assert_raises(ValueError):
dist.jointplot("x", "y", self.data, kind="not_a_kind")
| bsd-3-clause |
tapomayukh/projects_in_python | rapid_categorization/haptic_map/outlier/hmm_crossvalidation_force.py | 1 | 19066 | # Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_length')
from data_variable_length_force import Fmat_original
if __name__ == '__main__' or __name__ != '__main__':
print "Inside outlier HMM model training file"
Fmat = Fmat_original
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 35):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_rf_force = np.zeros((number_states,1))
sigma_rf = np.zeros((number_states,1))
while (j < number_states):
mu_rf_force[j] = np.mean(feature_1_final_data[j])
sigma_rf[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 35
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 70):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 35:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_rm_force = np.zeros((number_states,1))
sigma_rm = np.zeros((number_states,1))
while (j < number_states):
mu_rm_force[j] = np.mean(feature_1_final_data[j])
sigma_rm[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 70
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 105):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 70:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_sf_force = np.zeros((number_states,1))
sigma_sf = np.zeros((number_states,1))
while (j < number_states):
mu_sf_force[j] = np.mean(feature_1_final_data[j])
sigma_sf[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 105
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 140):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 105:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_sm_force = np.zeros((number_states,1))
sigma_sm = np.zeros((number_states,1))
while (j < number_states):
mu_sm_force[j] = np.mean(feature_1_final_data[j])
sigma_sm[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = [0.0]*number_states
B_rm = [0.0]*number_states
B_sf = [0.0]*number_states
B_sm = [0.0]*number_states
for num_states in range(number_states):
B_rf[num_states] = [mu_rf_force[num_states][0],sigma_rf[num_states][0]]
B_rm[num_states] = [mu_rm_force[num_states][0],sigma_rm[num_states][0]]
B_sf[num_states] = [mu_sf_force[num_states][0],sigma_sf[num_states][0]]
B_sm[num_states] = [mu_sm_force[num_states][0],sigma_sm[num_states][0]]
#print B_sm
#print mu_sm_motion
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
total_seq = Fmat
for i in range(140):
total_seq[i][:] = sum(total_seq[i][:],[])
while (trial_number < 6):
# For Training
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[1:5]
total_seq_rm = total_seq[36:40]
total_seq_sf = total_seq[71:75]
total_seq_sm = total_seq[106:110]
#print total_seq_rf
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+1:j+5]
total_seq_rm = total_seq_rm+total_seq[j+36:j+40]
total_seq_sf = total_seq_sf+total_seq[j+71:j+75]
total_seq_sm = total_seq_sm+total_seq[j+106:j+110]
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = [total_seq[0]]+total_seq[2:5]
total_seq_rm = [total_seq[35]]+total_seq[37:40]
total_seq_sf = [total_seq[70]]+total_seq[72:75]
total_seq_sm = [total_seq[105]]+total_seq[107:110]
#print total_seq_rf
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+0]]+total_seq[j+2:j+5]
total_seq_rm = total_seq_rm+[total_seq[j+35]]+total_seq[j+37:j+40]
total_seq_sf = total_seq_sf+[total_seq[j+70]]+total_seq[j+72:j+75]
total_seq_sm = total_seq_sm+[total_seq[j+105]]+total_seq[j+107:j+110]
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:2]+total_seq[3:5]
total_seq_rm = total_seq[35:37]+total_seq[38:40]
total_seq_sf = total_seq[70:72]+total_seq[73:75]
total_seq_sm = total_seq[105:107]+total_seq[108:110]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+2]+total_seq[j+3:j+5]
total_seq_rm = total_seq_rm+total_seq[j+35:j+37]+total_seq[j+38:j+40]
total_seq_sf = total_seq_sf+total_seq[j+70:j+72]+total_seq[j+73:j+75]
total_seq_sm = total_seq_sm+total_seq[j+105:j+107]+total_seq[j+108:j+110]
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:3]+total_seq[4:5]
total_seq_rm = total_seq[35:38]+total_seq[39:40]
total_seq_sf = total_seq[70:73]+total_seq[74:75]
total_seq_sm = total_seq[105:108]+total_seq[109:110]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+3]+total_seq[j+4:j+5]
total_seq_rm = total_seq_rm+total_seq[j+35:j+38]+total_seq[j+39:j+40]
total_seq_sf = total_seq_sf+total_seq[j+70:j+73]+total_seq[j+74:j+75]
total_seq_sm = total_seq_sm+total_seq[j+105:j+108]+total_seq[j+109:j+110]
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:4]
total_seq_rm = total_seq[35:39]
total_seq_sf = total_seq[70:74]
total_seq_sm = total_seq[105:109]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+4]
total_seq_rm = total_seq_rm+total_seq[j+35:j+39]
total_seq_sf = total_seq_sf+total_seq[j+70:j+74]
total_seq_sm = total_seq_sm+total_seq[j+105:j+109]
j = j+5
train_seq_rf = total_seq_rf
train_seq_rm = total_seq_rm
train_seq_sf = total_seq_sf
train_seq_sm = total_seq_sm
#print train_seq_rf[27]
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = [total_seq[0]]
total_seq_rm = [total_seq[35]]
total_seq_sf = [total_seq[70]]
total_seq_sm = [total_seq[105]]
#print np.shape(total_seq_rf)
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j]]
total_seq_rm = total_seq_rm+[total_seq[j+35]]
total_seq_sf = total_seq_sf+[total_seq[j+70]]
total_seq_sm = total_seq_sm+[total_seq[j+105]]
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = [total_seq[1]]
total_seq_rm = [total_seq[36]]
total_seq_sf = [total_seq[71]]
total_seq_sm = [total_seq[106]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+1]]
total_seq_rm = total_seq_rm+[total_seq[j+36]]
total_seq_sf = total_seq_sf+[total_seq[j+71]]
total_seq_sm = total_seq_sm+[total_seq[j+106]]
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = [total_seq[2]]
total_seq_rm = [total_seq[37]]
total_seq_sf = [total_seq[72]]
total_seq_sm = [total_seq[107]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+2]]
total_seq_rm = total_seq_rm+[total_seq[j+37]]
total_seq_sf = total_seq_sf+[total_seq[j+72]]
total_seq_sm = total_seq_sm+[total_seq[j+107]]
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = [total_seq[3]]
total_seq_rm = [total_seq[38]]
total_seq_sf = [total_seq[73]]
total_seq_sm = [total_seq[108]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+3]]
total_seq_rm = total_seq_rm+[total_seq[j+38]]
total_seq_sf = total_seq_sf+[total_seq[j+73]]
total_seq_sm = total_seq_sm+[total_seq[j+108]]
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = [total_seq[4]]
total_seq_rm = [total_seq[39]]
total_seq_sf = [total_seq[74]]
total_seq_sm = [total_seq[109]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+4]]
total_seq_rm = total_seq_rm+[total_seq[j+39]]
total_seq_sf = total_seq_sf+[total_seq[j+74]]
total_seq_sm = total_seq_sm+[total_seq[j+109]]
j = j+5
trial_number = trial_number + 1
print "Outlier HMM model trained"
| mit |
chrisburr/scikit-learn | sklearn/metrics/ranking.py | 17 | 26927 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/simple_plot.py | 1 | 1292 | """
===========
Simple Plot
===========
Create a simple plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
# Note that using plt.subplots below is equivalent to using
# fig = plt.figure and then ax = fig.add_subplot(111)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
pltshow(plt)
| mit |
dav-stott/phd-thesis | spectra_thesis_ais.py | 1 | 70177 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 25 08:48:28 2014
@author: david
"""
#*************** IMPORT DEPENDANCIES*******************************************
import numpy as np
#import spec_gdal4 as spg
from osgeo import gdal
import os
import csv
#import h5py
import datetime
import numpy.ma as ma
#from StringIO import StringIO
#import shapely
#import r2py
from osgeo import gdal_array
from osgeo import gdalconst
from osgeo.gdalconst import *
from osgeo import ogr
from osgeo import osr
from scipy.spatial import ConvexHull
from scipy.signal import find_peaks_cwt
from scipy.signal import savgol_filter
from scipy import interpolate
import matplotlib.pyplot as plt
#from shapely.geometry import LineString
################# Functions ###################################################
'''These here are functions that are not part of any specific class- these
are used by the data import classes for functions such as smoothing'''
def smoothing(perc_out, block_start, block_end, kparam, weight, sparam):
#D
sm_spline_block = perc_out[block_start:block_end,:]
sm_x = sm_spline_block[:,0]
sm_y = sm_spline_block[:,1]
sm_len = sm_x.shape
sm_weights = np.zeros(sm_len)+weight
sm_spline = interpolate.UnivariateSpline(sm_x,
sm_y,
k=kparam,
w=sm_weights,
s=sparam)
spline = sm_spline(sm_x)
spline = np.column_stack((sm_x,spline))
return spline
def interpolate_gaps(array1, array2):
array_end = array1.shape[0]-1
array1_endx = array1[array_end, 0]
#get the start point of the second array
array2_start = array2[0,0]
#get the length of the area to be interpolated
x_len = array2_start-array1_endx+1
#generate x values to use for the array
xvals = np.linspace(array1_endx, array2_start, num=x_len)
#y val for the start of the interpolated area
yval_array1 = array1[array_end,1]
# y val for the end of interpolated area
yval_array2 = array2[0,1]
#stack the values into a new array
xin = np.append(array1_endx, array2_start)
yin = np.append(yval_array1, yval_array2)
#numpy.interp(x, xp, fp)
gap_filling = np.interp(xvals, xin, yin)
filled_x = np.column_stack((xvals, gap_filling))
print (filled_x.shape)
return filled_x
class absorption_feature():
'''this class is used for the characterisation of spectral absortion features,
and their investigation using continuum removal'''
def __init__(self, spectra, feat_start, feat_end, feat_centre):
self.wl = spectra[:,0]
self.values = spectra[:,1]
print ('CALL TO ABSORPTION FEATURE')
# start of absorption feature
self.feat_start = feat_start
# end of absorption feature
self.feat_end = feat_end
# approximate 'centre' of feature
self.feat_centre = feat_centre
#get the range of the data
self.min_wl = self.wl[0]
self.max_wl = self.wl[-1]
print ('Absorption feature',self.feat_start,self.feat_end)
#define feature name
self.feat_name = str(self.feat_start)+'_'+str(self.feat_end)
'''# if the feature is within the range of the sensor, do stuff
if self.feat_start > self.min_wl and self.feat_end < self.max_wl:
print 'can do stuff with this data'
try:
self.abs_feature()
print ('Absorption feature analysis sussceful')
except:
print ('ERROR analysing absorption feature', self.feat_name)
pass
else:
print ('Cannot define feature: Out of range')'''
########## Methods ##################################################
def abs_feature(self):
print ('Call to abs_feature made')
# Meffod to calculate the end points of the absorption feature
# Does this using the Qhull algorithim form scipy spatial
#use the initial defintnion of the absorption feature as a staring point
# get the indices for these
cont_rem_stacked = None
ft_def_stacked = None
start_point = np.argmin(np.abs(self.wl-self.feat_start))
end_point = np.argmin(np.abs(self.wl-self.feat_end))
centre = np.argmin(np.abs(self.wl-self.feat_centre))
#find the index minima of reflectance
minima = np.argmin(self.values[start_point:end_point])+start_point
# if the minima = the start point then the start point is the minima
if minima == start_point:
left = minima
#if not then the left side of the feature is the maixima on the left of the minima
elif minima <= centre:
left = start_point+np.argmax(self.values[start_point:centre])
else:
left = start_point+np.argmax(self.values[start_point:minima])
#right is the maxima on the right of the absorption feature
if minima == end_point:
right = minima
else:
right = minima+np.argmax(self.values[minima:end_point])
# use left and right to create a 2D array of points
hull_in = np.column_stack((self.wl[left:right],self.values[left:right]))
#determine the minima of the points
hull_min = minima-left
if hull_min <= 0:
hull_min=0
#find the wavelength at minima
hull_min_wl = hull_in[hull_min,0]
# define the wavelength ranges we'll use to select simplices
ft_left_wl = hull_min_wl-((hull_min_wl-hull_in[0,0])/2)
ft_right_wl = hull_min_wl+((hull_in[-1,0]-hull_min_wl)/2)
#use scipy.spatial convex hull to determine the convex hull of the points
hull = ConvexHull(hull_in)
# get the simplex tuples from the convex hull
simplexes = hull.simplices
# create an empty list to store simplices potentially related to our feature
feat_pos = []
#iterate through the simplices
for simplex in simplexes:
#extract vertices from simplices
vertex1 = simplex[0]
vertex2 = simplex[1]
#print 'VERT!',hull_in[vertex1,0],hull_in[vertex2,0]
''' We're only interested in the upper hull. Qhull moves counter-
clockwise. Therefore we're only interested in those points where
vertex 1 is greater than vertex 2'''
'''The above may be total bollocks'''
if not vertex1 < vertex2:
'''We then use the wavelength ranges to determine which simplices
relate to our absorption feature'''
if hull_in[vertex2,0] <= ft_left_wl and \
hull_in[vertex2,0] >= self.wl[left] and \
hull_in[vertex1,0] >= ft_right_wl and \
hull_in[vertex1,0] <= self.wl[right]:
# append the vertices to the list
print (hull_in[vertex2,0])
print (hull_in[vertex1,0])
feat_pos.append((vertex2,vertex1))
print ('feat_pos length:',len(feat_pos), type(feat_pos))
#print feat_pos[0],feat_pos[1]
else:
continue
'''We only want one feature here. If there's more than one or less
than one we're not interested as we're probably not dealing with
vegetation'''
# If there's less than one feature...
if len(feat_pos) < 1:
print ('Absorption feature cannot be defined:less than one feature')
ft_def_stacked = None
ft_def_hdr = None
cont_rem_stacked = None
elif len(feat_pos) == 1:
feat_pos=feat_pos[0]
print ('£££££',feat_pos, type(feat_pos))
else:
#if theres more than one fid the widest one. this is not optimal.
if len(feat_pos) >1:
feat_width = []
for pair in feat_pos:
feat_width.append(pair[1]-pair[0])
print ('feat width:', feat_width)
#feat_width = np.asarray(feat_width)
print (feat_width)
f_max = feat_width.index(max(feat_width))
print (f_max)
feat_pos = feat_pos[f_max]
print (type(feat_pos))
if not feat_pos==None:
feat_pos = feat_pos[0], feat_pos[1]
print ('DOES MY FEAT_POS CONVERSION WORK?', feat_pos)
print ('Analysing absorption feature')
#slice
feature = hull_in[feat_pos[0]:feat_pos[1],:]
print ('Feature shape',feature.shape,'start:',feature[0,0],'end:',feature[-1,0])
#get the minima in the slice
minima_pos = np.argmin(feature[:,1])
#continuum removal
contrem = self.continuum_removal(feature,minima_pos)
# set up single value outputs
# start of feature
refined_start = feature[0,0]
# end of feature
refined_end = feature[-1,0]
# wavelength at minima
minima_WL = feature[minima_pos,0]
# reflectance at minima
minima_R = feature[minima_pos,1]
# area of absorption feature
feat_area = contrem[4]
# two band normalised index of minima and start of feature
left_tbvi = (refined_start-minima_R)/(refined_start+minima_R)
# two band normalised index of minima and right of feature
right_tbvi = (refined_end-minima_R)/(refined_end+minima_R)
# gradient of the continuum line
cont_gradient = np.mean(np.gradient(contrem[0]))
# area of continuum removed absorption feature
cont_rem_area = contrem[3]
# maxima of continuum removed absorption feature
cont_rem_maxima = np.max(contrem[1])
# wavelength of maxima of continuum removed absorption feature
cont_rem_maxima_wl = feature[np.argmax(contrem[1]),0]
#area of left part of continuum removed feature
cont_area_l = contrem[5]
if cont_area_l == None:
cont_area_l=0
#are aof right part of continuum removed feature
cont_area_r = contrem[6]
#stack these into a lovely array
ft_def_stacked = np.column_stack((refined_start,
refined_end,
minima_WL,
minima_R,
feat_area,
left_tbvi,
right_tbvi,
cont_gradient,
cont_rem_area,
cont_rem_maxima,
cont_rem_maxima_wl,
cont_area_l,
cont_area_r))
ft_def_hdr = str('"Refined start",'+
'"Refined end",'+
'"Minima Wavelenght",'+
'"Minima Reflectance",'+
'"Feature Area",'+
'"Left TBVI",'+
'"Right TBVI",'+
'"Continuum Gradient",'+
'"Continuum Removed Area",'+
'"Continuum Removed Maxima",'+
'"Continuum Removed Maxima WL",'+
'"Continuum Removed Area Left",'+
'"Continuum Removed Area Right",')
#print ft_def_stacked.shape #save the stacked outputs as hdf
# stack the 2d continuum removed outputs
cont_rem_stacked = np.column_stack((feature[:,0],
feature[:,1],
contrem[0],
contrem[1],
contrem[2]))
print ('CREM', cont_rem_stacked.shape)
return ft_def_stacked, ft_def_hdr, cont_rem_stacked
def continuum_removal(self,feature,minima):
#method to perform continuum r=<emoval
#pull out endmenmbers
end_memb = np.vstack((feature[0,:],feature[-1,:]))
#interpolate between the endmembers using x intervals
continuum_line = np.interp(feature[:,0], end_memb[:,0], end_memb[:,1])
#continuum removal
continuum_removed = continuum_line/feature[:,1]
#stack into coord pairs so we can measure the area of the feature
ft_coords = np.vstack((feature,
np.column_stack((feature[:,0],continuum_line))))
#get the area
area = self.area(ft_coords)
#get the area of the continuum removed feature
cont_rem_2d = np.column_stack((feature[:,0],continuum_removed))
cont_r_area = self.area(cont_rem_2d)
#band-normalised by area continuum removal
cont_BNA = (1-(feature[:,1]/continuum_line))/area
#continuum removed area on left of minima
cont_area_left = self.area(cont_rem_2d[0:minima,:])
#continuum removed area on right of minima
cont_area_right = self.area(cont_rem_2d[minima:,:])
return (continuum_line,
continuum_removed,
cont_BNA,
cont_r_area,
area,
cont_area_left,
cont_area_right)
#define area of 2d polygon- using shoelace formula
def area(self, coords2d):
#setup counter
total = 0.0
#get the number of coorsinate pairs
N = coords2d.shape[0]
#iterate through these
for i in range(N):
#define the first coordinate pair
vertex1 = coords2d[i]
#do the second
vertex2 = coords2d[(i+1) % N]
#append the first & second distance to the toatal
total += vertex1[0]*vertex2[1] - vertex1[1]*vertex2[0]
#return area
return abs(total/2)
class Indices():
#class that does vegetation indices
def __init__(self,spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''So, the init method here checks the range of the sensor and runs
the appropriate indices within that range, and saves them as hdf5.
The indices are all defined as methods of this class'''
def visnir(self):
# Sensor range VIS-NIR
if self.range[0] >= 350 and \
self.range[0] <= 500 and \
self.range[1] >= 900:
vis_nir = np.column_stack((self.sr700_800(),
self.ndvi694_760(),
self.ndvi695_805(),
self.ndvi700_800(),
self.ndvi705_750(),
self.rdvi(),
self.savi(),
self.msavi2(),
self.msr(),
self.msrvi(),
self.mdvi(),
self.tvi(),
self.mtvi(),
self.mtvi2(),
self.vog1vi(),
self.vog2(),
self.prsi(),
self.privi(),
self.sipi(),
self.mcari(),
self.mcari1(),
self.mcari2(),
self.npci(),
self.npqi(),
self.cri1(),
self.cri2(),
self.ari1(),
self.ari2(),
self.wbi()))
vis_nir_hdr=str('"sr700_800",'+
'"ndvi694_760",'+
'"ndvi695_805",'+
'"ndvi700_800",'+
'"ndvi705_750",'+
'"rdvi",'+
'"savi",'+
'"msavi2",'+
'"msr",'+
'"msrvi",'+
'"mdvi",'+
'"tvi",'+
'"mtvi",'+
'"mtvi2",'+
'"vog1vi",'+
'"vog2",'+
'"prsi"'+
'"privi",'+
'"sipi",'+
'"mcari",'+
'"mcari1",'+
'"mcari2",'+
'"npci",'+
'"npqi",'+
'"cri1",'+
'"cri2",'+
'"ari1",'+
'"ari2",'+
'"wbi"')
else:
vis_nir = None
vis_nir_hdr = None
return vis_nir,vis_nir_hdr
#Range NIR-SWIR
def nir_swir(self):
if self.range[0] <= 900 and self.range[1] >=2000:
nir_swir = np.column_stack((self.ndwi(),
self.msi(),
self.ndii()))
nir_swir_hdr = str('"ndwi",'+
'"msi",'+
'"ndii"')
else:
#continue
print ('not nir-swir')
nir_swir=None
nir_swir_hdr=None
return nir_swir, nir_swir_hdr
#range SWIR
def swir(self):
if self.range[1] >=2000:
swir = np.column_stack((self.ndni(),
self.ndli()))
swir_hdr=str('"ndni",'+
'"ndli"')
else:
print ('swir-nir')
swir = None
swir_hdr = None
#continue
return swir,swir_hdr
#||||||||||||||||||||| Methods |||||||||||||||||||||||||||||||||||||||||||||||
# function to run every permutation of the NDVI type index across the Red / IR
# ...... VIS / NIR methods ....
def multi_tbvi (self, red_start=650, red_end=750, ir_start=700, ir_end=850):
# get the indicies of the regions we're going to use.
# we've added default values here, but they can happily be overidden
#start of red
red_l =np.argmin(np.abs(self.wl-red_start))
#end of red
red_r = np.argmin(np.abs(self.wl-red_end))
#start of ir
ir_l = np.argmin(np.abs(self.wl-ir_start))
#end of ir
ir_r = np.argmin(np.abs(self.wl-ir_end))
#slice
left = self.values[red_l:red_r]
right = self.values[ir_l:ir_r]
#set up output
values = np.empty(3)
#set up counter
l = 0
#loop throught the values in the red
for lvalue in left:
l_wl = self.wl[l+red_l]
r = 0
l = l+1
#then calculate the index with each wl in the NIR
for rvalue in right:
value = (rvalue-lvalue)/(rvalue+lvalue)
r_wl = self.wl[r+ir_l]
out = np.column_stack((l_wl,r_wl,value))
values = np.vstack((values, out))
out = None
r = r+1
return values[1:,:]
def sr700_800 (self, x=700, y=800):
index = self.values[np.argmin(np.abs(self.wl-x))]/self.values[np.argmin(np.abs(self.wl-y))]
return index
def ndvi705_750 (self, x=705, y=750):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi700_800 (self, x=700, y=800):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi694_760 (self, x=694, y=760):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi695_805 (self, x=695, y=805):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npci (self, x=430, y=680):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npqi (self, x=415, y=435):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
#mSRvi
#= (750-445)/(705+445)
def msrvi (self):
x = 750
y = 445
z = 705
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
msrvi_val = (x_val-y_val)/(z_val+y_val)
return msrvi_val
#Vogelmann Red Edge 1
#740/720
def vog1vi (self):
x = 740
y = 720
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
vog1vi_val = (x_val/y_val)
return vog1vi_val
#Vogelmann Red Edge 2
#= (734-747)/(715+726)
def vog2 (self):
v = 734
x = 747
y = 715
z = 726
v_val = self.values[np.argmin(np.abs(self.wl-v))]
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
vog2_val = (v_val-x_val)/(y_val+z_val)
return vog2_val
#PRI
# (531-570)/(531+570)
def privi (self):
x = 531
y = 570
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
privi_val = (x_val-y_val)/(x_val+y_val)
return privi_val
#SIPI
#(800-445)/(800-680)
def sipi (self):
x = 800
y = 445
z = 680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
sipi_val = (x_val-y_val)/(x_val+z_val)
return sipi_val
#Water band index
# WBI = 900/700
def wbi (self):
x = 900
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
wbi_val = (x_val/y_val)
return wbi_val
#mNDVI
#= (750-705)/((750+705)-(445))
def mdvi (self):
x = 750
y = 705
z = 445
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mdvi_val = (x_val-y_val)/((x_val+y_val)-z_val)
return mdvi_val
#Carotenid Reflectance Index
#CRI1 = (1/510)-(1/550)
def cri1 (self):
x = 510
y = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri1_val = (1/x_val)-(1/y_val)
return cri1_val
#CRI2 = (1/510)-(1/700)
def cri2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri2_val = (1/x_val)-(1/y_val)
return cri2_val
#Anthocyanin
#ARI1 = (1/550)-(1/700)
def ari1 (self):
x = 550
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari1_val = (1/x_val)-(1/y_val)
return ari1_val
#ARI2 = 800*((1/550)-(1/700)_))
def ari2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari2_val = 800*((1/x_val)-(1/y_val))
return ari2_val
#MSR
#=((800/670)-1)/SQRT(800+670)
def msr (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msr_val = ((x_val/y_val)-1)/(np.sqrt(x_val+y_val))
return msr_val
#SAVI
#= (1+l)(800-670)/(800+670+l)
def savi (self, l=0.5):
x = 800
y = 670
l = 0.5
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
savi_val = ((1+l)*(x_val-y_val))/(x_val+y_val+l)
return savi_val
#MSAVI
#=1/2(sqrt(2*800)+1)-SQRT(((2*800+1)sqr)-8*(800-670)
def msavi2 (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msavi2_top1 = (2*x_val+1)
msavi2_top2 = (np.sqrt(np.square(2*x_val+1)-(8*(x_val-y_val))))
msavi2_top = msavi2_top1-msavi2_top2
msavi2_val = msavi2_top/2
return msavi2_val
#Modified clhoropyll absorption indec
#MCARI = ((700-670)-0.2*(700-550))*(700/670)
def mcari (self):
x = 700
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari_val = (x_val-y_val)-(0.2*(x_val-z_val)*(x_val/y_val))
return mcari_val
#Triangular vegetation index
#TVI 0.5*(120*(750-550))-(200*(670-550))
def tvi (self):
x = 750
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
tvi_val = 0.5*((120*(x_val-y_val))-(200*(z_val+y_val)))
return tvi_val
#MCAsavRI1 = 1.2*(2.5*(800-67-)-(1.3*800-550)
def mcari1 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari1_val = (1.2*((2.5*(x_val-y_val)))-(1.3*(x_val+z_val)))
return mcari1_val
#MTVI1
#=1.2*((1.2*(800-550))-(2.5(670-550)))
def mtvi (self):
x = 800
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi_val = (1.2*(12*(x_val-y_val)))-(2.5*(z_val-y_val))
return mtvi_val
def mcari2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari2_top = (1.5*(2.5*(x_val-y_val)))-(1.3*(x_val-z_val))
mcari2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mcari2_val = mcari2_top/mcari2_btm
return mcari2_val
#MTVI2=(1.5*(2.5(800-670)-2.5*(800-550))/sqrt((2*800+1s)sq)-((6*800)-(5*sqrt670))-0.5
def mtvi2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi2_top = (1.5*(2.5*(x_val-z_val)))-(1.3*(x_val-z_val))
mtvi2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mtvi2_val = mtvi2_top/mtvi2_btm
return mtvi2_val
#Renormalised DVI
#RDVI = (800-670)/sqrt(800+670)
def rdvi (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
rdvi_val = (x_val-y_val)/np.sqrt(x_val+y_val)
return rdvi_val
#Plant senescance reflectance index
#PRSI = (680-500)/750
def prsi (self):
x = 680
y = 500
z = 750
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
prsi_val = (x_val-y_val)/z_val
return prsi_val
#||||||||||||||||||||||| SWIR methods ||||||||||||||||||||||||||||||||||||
#Cellulose Absorption Index
#CAI =0.5*(2000-2200)/2100
def cai (self):
x = 2000
y = 2200
z = 2100
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
cai_val = 0.5*(x_val-y_val)-z_val
return cai_val
#Normalized Lignin Difference
#NDLI = (log(1/1754)-log(1/1680))/(log(1/1754)+log(1/1680))
def ndli (self):
x = 1754
y = 2680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndli_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndli_val
#Canopy N
#NDNI =(log(1/1510)-log(1/1680))/(log(1/1510)+log(1/1680))
def ndni (self):
x = 1510
y = 1680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndni_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndni_val
#|||||||||||||||||||||| Full spectrum (VIS-SWIR)||||||||||||||||||||||||||||
#Normalised Difference IR index
#NDII = (819-1649)/(819+1649)#NDII = (819-1649)/(819+1649)
def ndii (self):
x = 819
y = 1649
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndii_val = (x_val-y_val)/(x_val+y_val)
return ndii_val
#Moisture Stress Index
#MSI = 1599/819http://askubuntu.com/questions/89826/what-is-tumblerd
def msi (self):
x = 1599
y = 810
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msi_val = (x_val/y_val)
return msi_val
#NDWI
#(857-1241)/(857+1241)
def ndwi (self):
x = 857
y = 1241
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndwi_val = (x_val-y_val)/(x_val+y_val)
return ndwi_val
class red_edge():
'''Class to derive red edge position using a number of different methods'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''Again, the mehtod that initialises this class uses the range of the
sensor to check to see if it falls within the red-edge reigion. If so,
it will derive the red edge using the differnet methods and save these
as seprate hdf5 datasets in the appropriate group'''
if self.range[0] <= 670 and self.range[1] >=750:
self.redge_vals = np.column_stack((self.redge_linear(),
self.redge_lagrange(),
self.redge_linear_extrapolation()))
print (self.redge_vals)
print (self.redge_linear,self.redge_lagrange,self.redge_linear_extrapolation)
self.redge_hdr = str('"linear",'+
'"lagrange",'+
'"extrapolated"')
else:
print ('red_edge out of range')
self.redge_vals = None
self.redge_hdr = None
##################### METHODS #########################################
#linear- defined by clevers et al 1994:
def redge_linear(self):
r670 = self.values[np.argmin(np.abs(self.wl-670))]
r780 = self.values[np.argmin(np.abs(self.wl-780))]
r700 = self.values[np.argmin(np.abs(self.wl-700))]
r740 = self.values[np.argmin(np.abs(self.wl-740))]
r_edge = (r670+r780)/2
lin_rep =700+40*((r_edge-r700)/(r740-r700))
print ('REDGE_LINEAR',lin_rep)
return lin_rep
#Lagrangian method, after Dawson & Curran 1998
def redge_lagrange(self):
#select the red edge region of the first derviative and associate this
#with wavelength
x = 680
y = 730
first_diff = np.diff(self.values, 1)
spec_in = np.column_stack((self.wl[1:], first_diff))
l680 = np.argmin(np.abs(spec_in[:,0]-x))
r680 = spec_in[l680,0]
l730 = np.argmin(np.abs(spec_in[:,0]-y))
r730 = spec_in[l730,0]
redge_region_sel = np.where(np.logical_and(spec_in[:,0]>r680-1,
spec_in[:,0]<r730+1))
redge_region = spec_in[redge_region_sel]
#find the maximum first derivative, return index
dif_max = np.argmax(redge_region[:,1], axis=0)
#find band with the max derivative -1, return index
dif_max_less = (np.argmax(redge_region[:,1], axis=0))-1
#find band with the max derivative +1, return index
dif_max_more = (np.argmax(redge_region[:,1], axis=0))+1
if dif_max_more >= redge_region.shape[0]:
dif_max_more = redge_region.shape[0]-1
#use these indeces to slice the array
rmax = redge_region[dif_max]
rmax_less =redge_region[dif_max_less]
rmax_more =redge_region[dif_max_more]
#lagrangian interpolation with three points
#this has been expanded to make the syntax easier
a = rmax_less[1]/(rmax_less[0]-rmax[0])*(rmax_less[0]-rmax_more[0])
b = rmax[1]/(rmax[0]-rmax_less[0])*(rmax[0]-rmax_more[0])
c = rmax_more[1]/(rmax_more[0]-rmax_less[0])*(rmax_more[0]-rmax[0])
d = a*(rmax[0]+rmax_more[0])
e = b*(rmax_less[0]+rmax_more[0])
f = c*(rmax_less[0]+rmax[0])
lg_rep = (d+e+f)/(2*(a+b+c))
print ('Lagrangian', lg_rep)
return lg_rep
#Linear extrapolation- after Cho & Skidmore 2006, Cho et al 2007
def redge_linear_extrapolation(self):
diff = np.diff(self.values)
d680 = diff[np.argmin(np.abs(self.wl-680+1))]
d694 = diff[np.argmin(np.abs(self.wl-694+1))]
d724 = diff[np.argmin(np.abs(self.wl-724+1))]
d760 = diff[np.argmin(np.abs(self.wl-760+1))]
red_slope = ((d694-d680)/(694-680))
ir_slope = ((d760-d724)/(760-724))
red_inter = d680-(red_slope*680)
ir_inter = d724-(ir_slope*724)
wl = (ir_inter-red_inter)/(ir_slope-red_slope)
print ('^!!!!!!!!! Linear:',wl)
return np.abs(wl)
class fluorescence():
'''this class is inteded to look for evidence of photosynthetic flourescence
currently this is limited to simple reflectance indices. This should be
expanded to take in other more complex methods to invesitgae fluorescence'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
print ('call to fluor')
'''The init method checks the range to establish if it overlaps with
region of chlorophyll flourescence. If so it will will perform the
analysis methods and output to hdf5'''
def wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for reflectance'''
value = self.values[np.argmin(np.abs(self.wl-x))]
return value
def d_wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for the first derivative'''
diff = np.diff(self.values)
value = diff[np.argmin(np.abs(self.wl-x))+1]
return value
def wl_max_d(self):
'''method to extract wavelength of the maxima of the first derivative
and return this'''
start = np.argmin(np.abs(self.wl-650))
end = np.argmin(np.abs(self.wl-760))
diff = np.diff(self.values[start:end])
maxdiff = np.argmax(diff)
maxdiffwl = self.wl[maxdiff+start+1]
return maxdiffwl, diff[maxdiff]
def simple_ratios(self):
''' This method runs flourescence indices ratios and returns them as a
stacked numpy array'''
#r680/r630
r680r630 = self.wl_selector(680)/self.wl_selector(630)
print (r680r630)
#r685/r630
r685r630 = self.wl_selector(685)/self.wl_selector(630)
print (r685r630)
#r685/r655
r685r655 = self.wl_selector(685)/self.wl_selector(655)
print (r685r655)
#r687/r630
r687r630 = self.wl_selector(687)/self.wl_selector(630)
print (r687r630)
#r690/r630
r690r630 = self.wl_selector(690)/self.wl_selector(630)
print (r690r630)
#r750/r800
r750r800 = self.wl_selector(750)/self.wl_selector(800)
print (r750r800)
#sq(r685)/(r675-r690)
sqr685 = np.square(self.wl_selector(685))/(self.wl_selector(675)-self.wl_selector(690))
print (sqr685)
#(r675-r690)/sq(r683) Zarco-Tejada 2000
r675r690divsq683 = (self.wl_selector(675)-self.wl_selector(690))/np.square(self.wl_selector(683))
print (r675r690divsq683)
#d705/d722
d705d722 = self.d_wl_selector(705)/self.d_wl_selector(722)
print (d705d722)
#d730/d706
d730d706 = self.d_wl_selector(730)/self.d_wl_selector(706)
print (d730d706)
#(d688-d710)/sq(d697)
d686d710sq697 = (self.d_wl_selector(688)-self.d_wl_selector(710))\
/np.square(self.d_wl_selector(697))
print (d686d710sq697)
#wl at max d / d720
maxdd720 = self.wl_max_d()[1]/self.d_wl_selector(720)
print (maxdd720)
#wl at max d / d703
maxdd703 = self.wl_max_d()[1]/self.d_wl_selector(703)
print (maxdd703)
#wl at max d / d(max d+12)
print (self.wl_max_d()[0])
maxd12 = self.wl_max_d()[1]/self.d_wl_selector(self.wl_max_d()[0]+12)
print (maxd12)
combined = np.vstack((r680r630,
r685r630,
r685r655,
r687r630,
r690r630,
r750r800,
sqr685,
r675r690divsq683,
d705d722,
d730d706,
d686d710sq697,
maxdd720,
maxdd703,
maxd12))
fluo_hdr = str('"r680r630",'+
'"r685r630",'+
'"r685r655",'+
'"r687r630",'+
'"r690r630",'+
'"r750r800",'+
'"sqr685",'+
'"r675r690divsq683",'+
'"d705d722",'+
'"d730d706",'+
'"d686d710sq697",'+
'"maxdd720",'+
'"maxdd703",'+
'"maxd12"')
return combined, fluo_hdr
def dual_peak(self):
'''This fuction loogs for a dual peak in the red-edge region. If it's
there it measures the depth of the feature between the two peaks.
UNTESTED'''
start = self.wl_selector(640)
end = self.wl_selector(740)
d1_region = np.diff(self.values[start:end])
#d2_region = np.diff(self.values[start:end], n=2)
peak_finder = find_peaks_cwt(d1_region, np.arange(3,10))
peak_wl = wavelengths[peak_finder]
fluor_peaks = []
for peak in peak_finder:
if peak_wl[peak] == self.wl[self.wl_selector(668)]:
print ('found flourescence peak at 668nm')
fluor_peaks.append(peak)
elif peak_wl[peak] == self.wl[self.wl_selector(735)]:
print ('found flourescence peak at 735nm')
fluor_peaks.append[peak]
else:
print ('unknown peak')
'''if len(fluor_peaks) == 2:
something = 'something'''
class load_asd():
def __init__(self, indir, output_dir):
data_list = os.listdir(indir)
print (data_list)
#output_dir = os.path.join(indir,'output')
if not os.path.exists(output_dir):
os.mkdir(output_dirx)
for directory in data_list:
parent = os.path.join(indir, directory)
spectra_dir = os.path.join(parent, 'raw_spectra')
reading_info_dir = os.path.join(parent, 'reading_info')
sensor_name = 'ASD FieldSpec Pro'
sensor_type = 'SPR'
sensor_units = 'nm'
sensor_range = [350,2500]
os.chdir(reading_info_dir)
reading_info_file = open('reading_atributes.txt','rb')
reading_info = csv.DictReader(reading_info_file)
reading_info_array = np.empty(12)
readings_list = [row for row in reading_info]
for reading in readings_list[:]:
reading_filename = str(reading['reading_id']+'.txt')
reading_info_line = np.column_stack((reading['reading_id'],
reading['dartField'],
reading['transect'],
reading['transectPosition'],
reading['reading_type'],
reading['reading_coord_osgb_x'],
reading['reading_coord_osgb_y'],
reading['dateOfAcquisition'],
reading['timeOfAcquisition'],
reading['instrument_number'],
reading['dark_current'],
reading['white_ref']))
#print reading_info_line
if reading['reading_type']== 'REF':
reading_info_array = np.vstack((reading_info_array,reading_info_line))
#print reading_info_array
print ('*********** Loading File', reading_filename, '***********')
os.chdir(spectra_dir)
spec = np.genfromtxt(reading_filename,
delimiter=', ',
skiprows=30)
spec = np.column_stack((spec[:,0],spec[:,1]*100))
nir_start = 0
nir_end = 990
nir_weight = 3.5
nir_k = 4.9
nir_s =45
swir1_start = 1080
swir1_end = 1438
swir1_weight = 8.5
swir1_k = 3.5
swir1_s = 35
swir2_start = 1622
swir2_end = 2149
swir2_weight = 1.2
swir2_s = 92
swir2_k = 2.8
#smoothing(perc_out, block_start, block_end, kparam, weight, sparam)
nir_smoothed = smoothing(spec, nir_start, nir_end, nir_k, nir_weight, nir_s)
swir1_smoothed = smoothing(spec, swir1_start, swir1_end, swir1_k, swir1_weight, swir1_s)
swir2_smoothed = smoothing(spec, swir2_start, swir2_end, swir2_k, swir2_weight, swir2_s)
print ('Smoothed array shape', nir_smoothed.shape,swir1_smoothed.shape,swir2_smoothed.shape)
nir_swir_gap = interpolate_gaps(nir_smoothed,swir1_smoothed)
swir2_gap = interpolate_gaps(swir1_smoothed,swir2_smoothed)
spec_smoothed = np.vstack((nir_smoothed,
nir_swir_gap,
swir1_smoothed,
swir2_gap,
swir2_smoothed))
print ('Spec SHAPE:', spec.shape)
survey_dir = os.path.join(output_dir, directory)
if not os.path.exists(survey_dir):
os.mkdir(survey_dir)
os.chdir(survey_dir)
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
print (abs470.abs_feature()[0])
abs470_ftdef = abs470.abs_feature()[0]
print (abs470_ftdef)
abs470_crem = abs470.abs_feature()[2]
if not abs470_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs470_ftdef.txt',
abs470_ftdef,
header=abs470.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs470_crem.txt',
abs470_crem,
delimiter=',')
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
if not abs670_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs670_ftdef.txt',
abs670_ftdef,
header=abs670.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs670_crem.txt',
abs670_crem,
delimiter=',')
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1115,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
if not abs970_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs970_ftdef.txt',
abs970_ftdef,
header=abs970.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs970_crem.txt',
abs970_crem,
delimiter=',')
except:
pass
try:
abs1200 = absorption_feature(spec_smoothed,1080,1300,1190)
abs1200_ftdef = abs1200.abs_feature()[0]
abs1200_crem = abs1200.abs_feature()[2]
if not abs1200_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1200_ftdef.txt',
abs1200_ftdef,
header=abs1200.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1200_crem.txt',
abs1200_crem,
delimiter=',')
except:
pass
try:
abs1730 = absorption_feature(spec_smoothed,1630,1790,1708)
abs1730_ftdef = abs1730.abs_feature()[0]
abs1730_crem = abs1730.abs_feature()[2]
if not abs1730_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1730_ftdef.txt',
abs1730_ftdef,
header=abs1730.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1730_crem.txt',
abs1730_crem,
delimiter=',')
except:
pass
print (spec_smoothed.shape)
try:
abs2100 = absorption_feature(spec_smoothed,2001,2196,2188)
abs2100_ftdef = abs2100.abs_feature()[0]
abs2100_crem = abs2100.abs_feature()[2]
if not abs2100_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs2100_ftdef.txt',
abs2100_ftdet,
header=abs2100.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs2100_crem.txt',
abs2100_crem,
delimiter=',')
except:
pass
veg_indices = Indices(spec_smoothed)
indices = np.column_stack((veg_indices.visnir()[0],
veg_indices.nir_swir()[0],
veg_indices.swir()[0]))
print (veg_indices.visnir()[1],veg_indices.nir_swir()[1],veg_indices.swir()[1])
hdr = str(veg_indices.visnir()[1]+','+veg_indices.nir_swir()[1]+','+veg_indices.swir()[1])
np.savetxt(reading_filename[0:-4]+'_indices.txt',
indices,
header=hdr,
delimiter=',')
mtbvi = veg_indices.multi_tbvi()
np.savetxt(reading_filename[0:-4]+'_mtbvi.txt',
mtbvi,
delimiter=',')
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
print (redge.redge_vals)
np.savetxt(reading_filename[0:-4]+'_redge.txt',
redge.redge_vals,
delimiter=',')
fluo = fluorescence(spec_smoothed)
np.savetxt(reading_filename[0:-4]+'_flou.txt',
np.transpose(fluo.simple_ratios()[0]),
header = fluo.simple_ratios()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_spec.txt',
spec_smoothed,
delimiter=',')
class load_image():
def __init__(self, wavlengths_dir,image_dir,out_dir):
os.chdir(wavelengths_dir)
wavelengths = np.genfromtxt('wavelengths.txt')
print ('wavelengths array', wavelengths)
os.chdir(image_dir)
image_list = os.listdir(image_dir)
for image in image_list:
import_image = self.get_image(image)
image_name = image[:-4]
print ('IMAGE NAME:', image_name)
row = 1
img_array = import_image[0]
print ('Image_array', img_array)
projection = import_image[1]
print ('Projection',projection)
x_size = import_image[2]
print ('Xdim',x_size)
y_size = import_image[3]
print ('Ydim', y_size)
spatial = import_image[4]
print (spatial)
x_top_left = spatial[0]
ew_pix_size = spatial[1]
rotation_ew = spatial[2]
y_top_left = spatial[3]
rotation_y = spatial[4]
ns_pixel_size = spatial[5]
print ('Spatial', x_top_left,ew_pix_size,rotation_ew,y_top_left,rotation_y,ns_pixel_size)
print ('IMAGE ARRAY SHAPE',img_array.shape)
img_dims = img_array.shape
print (img_dims[0],'/',img_dims[1])
#indices+29
indices_out = np.zeros((img_dims[0],img_dims[1],29), dtype=np.float32)
#print indices_out
#redge=3
redge_out = np.zeros((img_dims[0],img_dims[1]),dtype=np.float32)
#fluo=14
fluo_out=np.zeros((img_dims[0],img_dims[1],14), dtype=np.float32)
print ('fluo out', fluo_out.shape)
ft470_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft670_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft970_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
x470 = np.argmin(np.abs(wavelengths-400))
y470 = np.argmin(np.abs(wavelengths-518))
len470 = y470-x470
cr470_out = np.zeros((img_dims[0],img_dims[1],len470), dtype=np.float32)
x670 = np.argmin(np.abs(wavelengths-548))
y670 = np.argmin(np.abs(wavelengths-800))
len670 = y670-x670
cr670_out = np.zeros((img_dims[0],img_dims[1],len670), dtype=np.float32)
print (cr670_out)
x970 = np.argmin(np.abs(wavelengths-880))
y970 = np.argmin(np.abs(wavelengths-1000))
len970 = y970-x970
cr970_out = np.zeros((img_dims[0],img_dims[1],len970), dtype=np.float32)
#print cr970_out
print (wavelengths)
row = 0
print ('***', row, img_dims[0])
for i in range(0,img_dims[0]):
print (i)
column = 0
#print 'COL',column
for j in range(0,img_dims[1]):
print ('COLUMN',column)
#print 'Pixel',pixel
name = '%s_pix-%s_%s' % (image_name,row,column)
print ('NAME',name)
pixel = img_array[row,column,:]
#smoothed = savgol_filter(pixel,5,2)
#spec_smoothed = np.column_stack((wavelengths,smoothed))
spec_smoothed = np.column_stack((wavelengths,pixel))
print (spec_smoothed)
veg_indices = Indices(spec_smoothed)
indices = veg_indices.visnir()[0]
print ('(*&)(*)(*&&^)^)^)*&^)*^)*&', indices)
indices_out[row,column,:]=indices
fluo = fluorescence(spec_smoothed)
fluo_out[row,column,:]=np.transpose(fluo.simple_ratios()[0])
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
redge_out[row,column]= redge.redge_vals[0,2]
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
abs470_ftdef = abs470.abs_feature()[0]
abs470_crem = abs470.abs_feature()[2]
abs470_crem = np.column_stack((abs470_crem[:,0],abs470_crem[:,4]))
print ('!*!*!*!*!&!*!*', abs470_crem)
crem470_fill = self.crem_fill(x470,y470,abs470_crem,wavelengths)
ft470_out[row,column,:]=abs470_ftdef
cr470_out[row,column,:]=crem470_fill
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
abs670_crem = np.column_stack((abs670_crem[:,0],abs670_crem[:,4]))
ft670_out[row,column,:]=abs670_ftdef
crem670_fill = self.crem_fill(x670,y670,abs670_crem,wavelengths)
cr670_out[row,column,:]=crem670_fill
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1000,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
abs970_crem = np.column_stack((abs970_crem[:,0],abs970_crem[:,4]))
crem970_fill = self.crem_fill(x970,y970,abs970_crem,wavelengths)
ft970_out[row,column,:]=abs970_ftdef
cr970_out[row,column,:]=crem970_fill
except:
pass
column = column+1
print (pixel.shape)
row = row+1
self.writeimage(out_dir,image+'_indices.tif',indices_out,spatial)
self.writeimage(out_dir,image+'_fluo.tif',fluo_out,spatial)
self.writeimage(out_dir,image+'_redge.tif',redge_out,spatial)
self.writeimage(out_dir,image+'_ft470.tif',ft470_out,spatial)
self.writeimage(out_dir,image+'_cr470.tif',cr470_out,spatial)
self.writeimage(out_dir,image+'_ft670.tif',ft670_out,spatial)
self.writeimage(out_dir,image+'_cr670.tif',cr670_out,spatial)
self.writeimage(out_dir,image+'_ft970.tif',ft970_out,spatial)
self.writeimage(out_dir,image+'_cr970.tif',cr970_out,spatial)
def crem_fill(self,xwl,ywl,bna,wavelengths):
bna_out=np.zeros((ywl-xwl))
bna_wvl = bna[:,0]
bna_refl= bna[:,1]
full_wl = wavelengths[xwl:ywl]
index = np.argmin(np.abs(wavelengths-bna_wvl[0]))
bna_out[index:]=bna_refl
return bna_out
def get_image(self, image):
print ('call to get_image')
# open the dataset
dataset = gdal.Open(image, GA_ReadOnly)
print ('Dataset',dataset)
# if there's nothign there print error
if dataset is None:
print ('BORK: Could not load file: %s' %(image))
# otherwise do stuff
else:
#get the format
driver = dataset.GetDriver().ShortName
#get the x dimension
xsize = dataset.RasterXSize
#get the y dimension
ysize = dataset.RasterYSize
#get the projection
proj = dataset.GetProjection()
#get the number of bands
bands = dataset.RasterCount
#get the geotransform Returns a list object. This is standard GDAL ordering:
#spatial[0] = top left x
#spatial[1] = w-e pixel size
#spatial[2] = rotation (should be 0)
#spatial[3] = top left y
#spatial[4] = rotation (should be 0)
#spatial[5] = n-s pixel size
spatial = dataset.GetGeoTransform()
#print some stuff to console to show we're paying attention
print ('Found raster in %s format. Raster has %s bands' %(driver,bands))
print ('Projected as %s' %(proj))
print ('Dimensions: %s x %s' %(xsize,ysize))
#instantiate a counter
count = 1
#OK. This is the bit that catually loads the bands in in a while loop
# Loop through bands as long as count is equal to or less than total
while (count<=bands):
#show that your computer's fans are whining for a reason
print ('Loading band: %s of %s' %(count,bands))
#get the band
band = dataset.GetRasterBand(count)
# load this as a numpy array
data_array = band.ReadAsArray()
'''data_array = ma.masked_where(data_array == 0, data_array)
data_array = data_array.filled(-999)'''
data_array = data_array.astype(np.float32, copy=False)
# close the band object
band = None
#this bit stacks the bands into a combined numpy array
#if it's the first band copy the array directly to the combined one
if count == 1:
stacked = data_array
#else combine these
else:
stacked = np.dstack((stacked,data_array))
#stacked = stacked.filled(-999)
#just to check it's working
#print stacked.shape
# increment the counter
count = count+1
#stacked = stacked.astype(np.float32, copy=False)
return stacked,proj,xsize,ysize,spatial
def writeimage(self,
outpath,
outname,
image,
spatial):
data_out = image
print ('ROWS,COLS',image.shape)
print ('Call to write image')
os.chdir(outpath)
print ('OUTPATH',outpath)
print ('OUTNAME',outname)
#load the driver for the format of choice
driver = gdal.GetDriverByName("Gtiff")
#create an empty output file
#get the number of bands we'll need
try:
bands = image.shape[2]
except:
bands=1
print ('BANDS OUT', bands)
#file name, x columns, y columns, bands, dtype
out = driver.Create(outname, image.shape[1], image.shape[0], bands, gdal.GDT_Float32)
#define the location using coords of top-left corner
# minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation
out.SetGeoTransform(spatial)
srs = osr.SpatialReference()
#get the coodrinate system using the ESPG code
srs.SetWellKnownGeogCS("EPSG:27700")
#set pstackedstackedstackedtojection of output file
out.SetProjection(srs.ExportToWkt())
band = 1
if bands == 1:
out.GetRasterBand(band).WriteArray(data_out)
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
else:
while (band<=bands):
data = data_out[:,:,band-1]
#write values to empty array
out.GetRasterBand(band).WriteArray( data )
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
band = band+1
out = None
print ('Processing of %s complete' % (outname))
return outname
if __name__ == "__main__":
#dir_path = os.path.dirname(os.path.abspath('...'))
#data_root = os.path.join(dir_path, 'data')
data_root = '/home/dav/data/temp/test/test_spec'
for folder in os.listdir(data_root):
input_dir = os.path.join(data_root,folder)
print (input_dir)
surveys_list = os.listdir(input_dir)
print (surveys_list)
for survey_dir in surveys_list:
print (survey_dir)
site_dir=os.path.join(input_dir,survey_dir)
print (site_dir)
image_path = os.path.join(site_dir, 'image')
print (image_path)
wavelengths_dir = os.path.join(site_dir, 'wavelengths')
print (wavelengths_dir)
out_dir = os.path.join(site_dir,'output')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
load_image(wavelengths_dir,image_path,out_dir) | mit |
rsheftel/pandas_market_calendars | tests/test_bse_calendar.py | 1 | 1192 | import datetime
import pandas as pd
import pytz
from pandas_market_calendars.exchange_calendar_bse import BSEExchangeCalendar, BSEClosedDay
def test_time_zone():
assert BSEExchangeCalendar().tz == pytz.timezone('Asia/Calcutta')
assert BSEExchangeCalendar().name == 'BSE'
def test_holidays():
bse_calendar = BSEExchangeCalendar()
trading_days = bse_calendar.valid_days(pd.Timestamp('2004-01-01'), pd.Timestamp('2018-12-31'))
for session_label in BSEClosedDay:
assert session_label not in trading_days
def test_open_close_time():
bse_calendar = BSEExchangeCalendar()
india_time_zone = pytz.timezone('Asia/Calcutta')
bse_schedule = bse_calendar.schedule(
start_date=india_time_zone.localize(datetime.datetime(2015, 1, 14)),
end_date=india_time_zone.localize(datetime.datetime(2015, 1, 16))
)
assert BSEExchangeCalendar.open_at_time(
schedule=bse_schedule,
timestamp=india_time_zone.localize(datetime.datetime(2015, 1, 14, 11, 0))
)
assert not BSEExchangeCalendar.open_at_time(
schedule=bse_schedule,
timestamp=india_time_zone.localize(datetime.datetime(2015, 1, 9, 12, 0))
)
| mit |
edonyM/emthesis | code/3point2plane.py | 1 | 3545 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-11-30 16:04
#
# Filename: 3point2plane.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('3 point into plane')
ax = fig.gca(projection='3d')
X = np.arange(0, 10, 0.1)
Y = np.arange(0, 10, 0.1)
X, Y = np.meshgrid(X, Y)
Z = 5 - 0.3*X + 0.48*Y
p1 = [5.3, 0.1, 5-0.3*5.3+0.48*0.1]
p2 = [2.3, 0.7, 5-0.3*2.3+0.48*0.7]
p3 = [8.3, 3.1, 5-0.3*8.3+0.48*3.1]
ax.plot_surface(X, Y, Z, rstride=100, cstride=100, alpha=0.3)
ax.scatter(p1[0], p1[1], p1[2])
ax.scatter(p2[0], p2[1], p2[2])
ax.scatter(p3[0], p3[1], p3[2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
loganlinn/mlia | resources/Ch10/kMeans.py | 3 | 6419 | '''
Created on Feb 16, 2011
k Means Clustering for Ch10 of Machine Learning in Action
@author: Peter Harrington
'''
from numpy import *
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
return centroids
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))#create mat to assign data points
#to a centroid, also holds SE of each point
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
if distJI < minDist:
minDist = distJI; minIndex = j
if clusterAssment[i,0] != minIndex: clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
print centroids
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster
centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean
return centroids, clusterAssment
def biKmeans(dataSet, k, distMeas=distEclud):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))
centroid0 = mean(dataSet, axis=0).tolist()[0]
centList =[centroid0] #create a list with one centroid
for j in range(m):#calc initial Error
clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)):
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
print 'the bestCentToSplit is: ',bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids
centList.append(bestNewCents[1,:].tolist()[0])
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
return mat(centList), clusterAssment
import urllib
import json
def geoGrab(stAddress, city):
apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
params = {}
params['flags'] = 'J'#JSON return type
params['appid'] = 'aaa0VN6k'
params['location'] = '%s %s' % (stAddress, city)
url_params = urllib.urlencode(params)
yahooApi = apiStem + url_params #print url_params
print yahooApi
c=urllib.urlopen(yahooApi)
return json.loads(c.read())
from time import sleep
def massPlaceFind(fileName):
fw = open('places.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
retDict = geoGrab(lineArr[1], lineArr[2])
if retDict['ResultSet']['Error'] == 0:
lat = float(retDict['ResultSet']['Results'][0]['latitude'])
lng = float(retDict['ResultSet']['Results'][0]['longitude'])
print "%s\t%f\t%f" % (lineArr[0], lat, lng)
fw.write('%s\t%f\t%f\n' % (line, lat, lng))
else: print "error fetching"
sleep(1)
fw.close()
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
import matplotlib
import matplotlib.pyplot as plt
def clusterClubs(numClust=5):
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show()
| epl-1.0 |
eric-haibin-lin/mxnet | example/named_entity_recognition/src/ner.py | 4 | 12663 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from collections import Counter
import itertools
import iterators
import os
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import pickle
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--output-dir', type=str, default='../results',
help='directory to save model files to')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--train_fraction', type=float, default=0.8,
help='fraction of data to use for training. remainder used for testing.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--buckets', type=str, default="",
help='unique bucket sizes')
parser.add_argument('--char-embed', type=int, default=25,
help='Embedding size for each unique character.')
parser.add_argument('--char-filter-list', type=str, default="3,4,5",
help='unique filter sizes for char level cnn')
parser.add_argument('--char-filters', type=int, default=20,
help='number of each filter size')
parser.add_argument('--word-embed', type=int, default=500,
help='Embedding size for each unique character.')
parser.add_argument('--word-filter-list', type=str, default="3,4,5",
help='unique filter sizes for char level cnn')
parser.add_argument('--word-filters', type=int, default=200,
help='number of each filter size')
parser.add_argument('--lstm-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--lstm-layers', type=int, default=1,
help='number of recurrent layers')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def save_model():
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
return mx.callback.do_checkpoint(os.path.join(args.output_dir, "checkpoint"), args.save_period)
def build_vocab(nested_list):
"""
:param nested_list: list of list of string
:return: dictionary mapping from string to int, inverse of that dictionary
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*nested_list))
logging.info("build_vocab: word_counts=%d" % (len(word_counts)))
# Mapping from index to label
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from label to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return vocabulary, vocabulary_inv
def build_iters(data_dir, max_records, train_fraction, batch_size, buckets=None):
"""
Reads a csv of sentences/tag sequences into a pandas dataframe.
Converts into X = array(list(int)) & Y = array(list(int))
Splits into training and test sets
Builds dictionaries mapping from index labels to labels/ indexed features to features
:param data_dir: directory to read in csv data from
:param max_records: total number of records to randomly select from input data
:param train_fraction: fraction of the data to use for training
:param batch_size: records in mini-batches during training
:param buckets: size of each bucket in the iterators
:return: train_iter, val_iter, word_to_index, index_to_word, pos_to_index, index_to_pos
"""
# Read in data as numpy array
df = pd.read_pickle(os.path.join(data_dir, "ner_data.pkl"))[:max_records]
# Get feature lists
entities=[list(array) for array in df["BILOU_tag"].values]
sentences = [list(array) for array in df["token"].values]
chars=[[[c for c in word] for word in sentence] for sentence in sentences]
# Build vocabularies
entity_to_index, index_to_entity = build_vocab(entities)
word_to_index, index_to_word = build_vocab(sentences)
char_to_index, index_to_char = build_vocab([np.array([c for c in word]) for word in index_to_word])
save_obj(entity_to_index, os.path.join(args.data_dir, "tag_to_index"))
# Map strings to integer values
indexed_entities=[list(map(entity_to_index.get, l)) for l in entities]
indexed_tokens=[list(map(word_to_index.get, l)) for l in sentences]
indexed_chars=[[list(map(char_to_index.get, word)) for word in sentence] for sentence in chars]
# Split into training and testing data
idx=int(len(indexed_tokens)*train_fraction)
logging.info("Preparing train/test datasets splitting at idx %d on total %d sentences using a batchsize of %d", idx, len(indexed_tokens), batch_size)
X_token_train, X_char_train, Y_train = indexed_tokens[:idx], indexed_chars[:idx], indexed_entities[:idx]
X_token_test, X_char_test, Y_test = indexed_tokens[idx:], indexed_chars[idx:], indexed_entities[idx:]
# build iterators to feed batches to network
train_iter = iterators.BucketNerIter(sentences=X_token_train, characters=X_char_train, label=Y_train,
max_token_chars=5, batch_size=batch_size, buckets=buckets)
logging.info("Creating the val_iter using %d sentences", len(X_token_test))
val_iter = iterators.BucketNerIter(sentences=X_token_test, characters=X_char_test, label=Y_test,
max_token_chars=train_iter.max_token_chars, batch_size=batch_size, buckets=train_iter.buckets)
return train_iter, val_iter, word_to_index, char_to_index, entity_to_index
def sym_gen(seq_len):
"""
Build NN symbol depending on the length of the input sequence
"""
sentence_shape = train_iter.provide_data[0][1]
char_sentence_shape = train_iter.provide_data[1][1]
entities_shape = train_iter.provide_label[0][1]
X_sent = mx.symbol.Variable(train_iter.provide_data[0].name)
X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
###############################
# Character embedding component
###############################
char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed')
char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0,1,seq_len,-1,args.char_embed), name='char_embed2')
char_cnn_outputs = []
for i, filter_size in enumerate(args.char_filter_list):
# Kernel that slides over entire words resulting in a 1d output
convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1),
num_filter=args.char_filters, name="char_conv_layer_" + str(i))
acti = mx.sym.Activation(data=convi, act_type='tanh')
pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1),
stride=(1, 1, 1), name="char_pool_layer_" + str(i))
pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name="cchar_conv_layer_" + str(i))
char_cnn_outputs.append(pooli)
# combine features from all filters & apply dropout
cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name="cnn_char_features")
regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training',
name='regularized charCnn features')
##################################
# Combine char and word embeddings
##################################
word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed')
rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input')
##############################
# Bidirectional LSTM component
##############################
# unroll the lstm cell in time, merging outputs
bi_cell.reset()
output, states = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True)
# Map to num entity classes
rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output')
fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer')
# reshape back to same shape as loss will be
reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1))
sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax')
return sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(train_iter, val_iter):
import metrics
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
logging.info("train on device %s using optimizer %s at learningrate %f for %d epochs using %d records: lstm_state_size=%d ...",
devs, args.optimizer, args.lr, args.num_epochs, args.max_records, args.lstm_state_size)
module = mx.mod.BucketingModule(sym_gen, train_iter.default_bucket_key, context=devs)
module.fit(train_data=train_iter,
eval_data=val_iter,
eval_metric=metrics.composite_classifier_metrics(),
optimizer=args.optimizer,
optimizer_params={'learning_rate': args.lr },
initializer=mx.initializer.Uniform(0.1),
num_epoch=args.num_epochs,
epoch_end_callback=save_model())
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.buckets = list(map(int, args.buckets.split(','))) if len(args.buckets) > 0 else None
args.char_filter_list = list(map(int, args.char_filter_list.split(',')))
# Build data iterators
train_iter, val_iter, word_to_index, char_to_index, entity_to_index = build_iters(args.data_dir, args.max_records,
args.train_fraction, args.batch_size, args.buckets)
logging.info("validation iterator: %s", val_iter)
# Define the recurrent layer
bi_cell = mx.rnn.SequentialRNNCell()
for layer_num in range(args.lstm_layers):
bi_cell.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="forward_layer_" + str(layer_num)),
mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="backward_layer_" + str(layer_num))))
bi_cell.add(mx.rnn.DropoutCell(args.dropout))
train(train_iter, val_iter) | apache-2.0 |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/experiment.py | 4 | 15233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
__all__ = ["Experiment"]
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
@deprecated_arg_values(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
"train_and_evaluate. Use min_eval_frequency and call train_and_evaluate "
"instead. Note, however, that the default for min_eval_frequency is 1, "
"meaning models will be evaluated every time a new checkpoint is "
"available. In contrast, the default for local_eval_frequency is None, "
"resulting in evaluation occurring only after training has completed. "
"min_eval_frequency is ignored when calling the deprecated local_run.",
local_eval_frequency=None)
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=1):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing `Trainable` and `Evaluable`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
local_eval_frequency: Frequency of running eval in steps,
when running locally. If `None`, runs evaluation only at the end of
training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
Raises:
ValueError: if `estimator` does not implement `Evaluable` and `Trainable`.
"""
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError("`estimator` must implement `Evaluable`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError("`estimator` must implement `Trainable`.")
super(Experiment, self).__init__()
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._train_monitors = train_monitors
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
self._min_eval_frequency = min_eval_frequency
@property
def estimator(self):
return self._estimator
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
if self._estimator.config.cluster_spec and self._estimator.config.master:
self._start_server()
if delay_secs is None:
task_id = self._estimator.config.task or 0
delay_secs = min(60, task_id * 5)
if delay_secs:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._estimator.fit(input_fn=self._train_input_fn,
max_steps=self._train_steps,
monitors=self._train_monitors)
def evaluate(self, delay_secs=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass")
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. It never returns.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
last_fitted_error_time = 0
while True:
start = time.time()
try:
self._estimator.evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name)
except NotFittedError:
# Print warning message every 10 mins.
if time.time() - last_fitted_error_time > 600:
logging.warning(
"Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint will be ready.")
last_fitted_error_time = time.time()
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def continuous_eval(self, delay_secs=None, throttle_delay_secs=None):
self._continuous_eval(self._eval_input_fn,
name="continuous",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None):
self._continuous_eval(self._train_input_fn,
name="continuous_on_train_data",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the contructor arg
`min_eval_frequency`. When this parameter is None or 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
if self._min_eval_frequency:
self._train_monitors += [monitors.ValidationMonitor(
input_fn=self._eval_input_fn, eval_steps=self._eval_steps,
metrics=self._eval_metrics, every_n_steps=self._min_eval_frequency,
name=eval_dir_suffix,
)]
self.train(delay_secs=0)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix)
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training and evaluating the estimator both for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._estimator.fit(input_fn=self._train_input_fn,
steps=1,
monitors=self._train_monitors)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.job_name or not config.master or
config.task is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, job_name, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.job_name,
task_index=config.task,
config=config.tf_config,
start=False)
server.start()
return server
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Example usage:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/feature_selection/__init__.py | 140 | 1302 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
| bsd-3-clause |
ifuding/Kaggle | PMRCN/Code/siamese_net.py | 1 | 22230 |
from sklearn import *
import sklearn
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
from time import gmtime, strftime
import numpy.random as rng
from multiprocessing.dummy import Pool
import h5py
import concurrent.futures
import tensorflow as tf
import multiprocessing as mp
from sklearn.cross_validation import KFold
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers import Input, concatenate, merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras import backend as K
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
graph = tf.get_default_graph()
HIDDEN_UNITS = [64, 16, 8]
DNN_EPOCHS = 40
BATCH_SIZE = 5
DNN_BN = True
DROPOUT_RATE = 0.5
SIAMESE_PAIR_SIZE = 100000
MAX_WORKERS = 8
EMBEDDING_SIZE = 6
full_feature = True
data_folder = '../Data/'
train = pd.read_csv(data_folder + 'training_variants')
#print train.dtypes
test = pd.read_csv(data_folder + 'test_variants')
trainx = pd.read_csv(data_folder + 'training_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
#print trainx.dtypes
testx = pd.read_csv(data_folder + 'test_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
train = pd.merge(train, trainx, how='left', on='ID').fillna('')
#train = train.iloc[1:1000]
y = train['Class'].values
train = train.drop(['Class'], axis=1)
test = pd.merge(test, testx, how='left', on='ID').fillna('')
pid = test['ID'].values
#df_all = pd.concat((train, test), axis=0, ignore_index=True)
#df_all['Gene_Share'] = df_all.apply(lambda r: sum([1 for w in r['Gene'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#df_all['Variation_Share'] = df_all.apply(lambda r: sum([1 for w in r['Variation'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#
#print df_all[['Gene_Share', 'Variation_Share']].max()
## exit(0)
#if full_feature:
# #commented for Kaggle Limits
# for i in range(5):
# df_all['Gene_'+str(i)] = df_all['Gene'].map(lambda x: str(x[i]) if len(x)>i else '')
# df_all['Variation'+str(i)] = df_all['Variation'].map(lambda x: str(x[i]) if len(x)>i else '')
# print df_all.dtypes
#
# gen_var_lst = sorted(list(train.Gene.unique()) + list(train.Variation.unique()))
# print(len(gen_var_lst))
# gen_var_lst = [x for x in gen_var_lst if len(x.split(' '))==1]
# print(len(gen_var_lst))
# i_ = 0
# #commented for Kaggle Limits
# for gen_var_lst_itm in gen_var_lst:
# if i_ % 100 == 0: print(i_)
# df_all['GV_'+str(gen_var_lst_itm)] = df_all['Text'].map(lambda x: str(x).count(str(gen_var_lst_itm))).astype(np.int8)
# i_ += 1
# if i_ == 5:
# break
#
#for c in df_all.columns:
# if df_all[c].dtype == 'object':
# if c in ['Gene','Variation']:
# lbl = preprocessing.LabelEncoder()
# df_all[c+'_lbl_enc'] = lbl.fit_transform(df_all[c].values)
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
# elif c != 'Text':
# lbl = preprocessing.LabelEncoder()
# df_all[c] = lbl.fit_transform(df_all[c].values)
# if c=='Text':
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
#
#train = df_all.iloc[:len(train)]
#print "... train dtypes before svd ..."
#print train.dtypes
#print train.head()
#exit(0)
#test = df_all.iloc[len(train):]
#
#class cust_regression_vals(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# x = x.drop(['Gene', 'Variation','ID','Text'],axis=1).values
# return x
#
#class cust_txt_col(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def __init__(self, key):
# self.key = key
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# return x[self.key].apply(str)
#
#print('Pipeline...')
#fp = pipeline.Pipeline([
# ('union', pipeline.FeatureUnion(
# n_jobs = -1,
# transformer_list = [
# ('standard', cust_regression_vals()),
# ('pi1', pipeline.Pipeline([('Gene', cust_txt_col('Gene')), ('count_Gene', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd1', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# ('pi2', pipeline.Pipeline([('Variation', cust_txt_col('Variation')), ('count_Variation', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd2', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# #commented for Kaggle Limits
# ('pi3', pipeline.Pipeline([('Text', cust_txt_col('Text')), ('tfidf_Text', feature_extraction.text.TfidfVectorizer(ngram_range=(1, 2))), ('tsvd3', decomposition.TruncatedSVD(n_components=50, n_iter=25, random_state=12))]))
# ])
# )])
#
#train = fp.fit_transform(train);
#print type(train)
#print(train.shape)
#print (train.nbytes)
#np.save("train_array", train)
## print(df.dtypes)
## print(df.memory_usage())
#test = fp.transform(test); print(test.shape)
#np.save("test_array", test)
#exit(0)
train = np.load("./train_array.npy")
test = np.load("./test_array.npy")
# siamese_features_array = np.load("./siamese_features_array_2017_09_15_07_57_44.npy")
y = y - 1 #fix for zero bound array
CONTINUOUS_INDICES = []
SPARSE_INDICES = []
for i in range((train.shape)[1]):
if (i >= 3205 and i <= 3212):
pass
elif (i >= 2 and i <= 113): # or (i >= 114 and i <= 3204):
SPARSE_INDICES.append(i)
else:
CONTINUOUS_INDICES.append(i)
#train = train[:, CONTINUOUS_INDICES]
#test = test[:, CONTINUOUS_INDICES]
print('train shape after loading and selecting trainging columns: %s' % str(train.shape))
siamese_train_len = len(train) // 3
print('siamese_train_len is %d' % (siamese_train_len))
siamese_train_data = train[:siamese_train_len]
siamese_train_label = y[:siamese_train_len]
lgbm_train_data = train[siamese_train_len:]
lgbm_train_label = y[siamese_train_len:]
#train = train[:200]
#y = y[:200]
#test = test[:200]
#pid = pid[:200]
def xgbTrain(train_data, train_label, flod = 5):
"""
"""
denom = 0
fold = 5 #Change to 5, 1 for Kaggle Limits
models = []
for i in range(fold):
params = {
'eta': 0.03333,
'max_depth': 4,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'num_class': 9,
'seed': i,
'silent': True
}
x1, x2, y1, y2 = model_selection.train_test_split(train_data, train_label, test_size=0.18, random_state=i)
watchlist = [(xgb.DMatrix(x1, y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]
model = xgb.train(params, xgb.DMatrix(x1, y1), 1000, watchlist, verbose_eval=50, early_stopping_rounds=100)
score1 = metrics.log_loss(y2, model.predict(xgb.DMatrix(x2), ntree_limit=model.best_ntree_limit), labels = list(range(9)))
#print(score1)
models.append((model, 'x'))
return models
def lgbm_train(train_data, train_label, fold = 5):
"""
LGB Training
"""
# print train.shape
# print siamese_features_array.shape
# train_merge = siamese_features_array #np.concatenate((train, siamese_features_array), axis = 1)
# print train_merge.shape
# # exit(0)
print("Over all training size:")
print(train_data.shape)
# train_data = train_merge#[:train_len * 3 / 10]
# train_label = lgbm_train_label#[:train_len * 3 / 10]
#valide_data = train_merge[train_len * 9 / 10:]
#valide_label = y[train_len * 9 / 10:]
models = []
for i in range(fold):
d_train = lgb.Dataset(train_data, train_label) #, categorical_feature = SPARCE_INDICES)
#d_valide = lgb.Dataset(valide_data, valide_label)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'multiclass',
'metric': {'multi_logloss'},
'num_class': 9,
# 'num_leaves': 256,
# 'max_depth': 12,
# 'feature_fraction': 0.9,
# 'bagging_fraction': 0.95,
# 'bagging_freq': 5,
'num_leaves': 60, # 60,
# 'min_sum_hessian_in_leaf': 20,
'max_depth': 10, # 10,
'learning_rate': 0.02, # 0.02,
'feature_fraction': 0.5,
'verbose': 0,
# 'valid_sets': [d_valide],
'num_boost_round': 327,
'feature_fraction_seed': i,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 15,
# 'bagging_seed': i,
# 'early_stopping_round': 10
# 'random_state': 10
# 'verbose_eval': 20
#'min_data_in_leaf': 665
}
# ROUNDS = 1
print('fold: %d th light GBM train :-)' % (i))
# params['feature_fraction_seed'] = i
#bst = lgb.train(
# params ,
# d_train,
# verbose_eval = False
# # valid_sets = [d_valide]
# #num_boost_round = 1
# )
cv_result = lgb.cv(params, d_train, nfold=10)
pd.DataFrame(cv_result).to_csv('cv_result', index = False)
exit(0)
# pred = model_eval(bst, 'l', test)
#print pred.shape
#print pred[0, :]
models.append((bst, 'l'))
return models
def create_model(input_len):
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_len))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
# model.add(Dropout(0.1))
#model.add(Dense(32, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(9, activation='softmax'))
# optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def create_embedding_model(CONTINUE_SIZE, SPARSE_SIZE):
"""
"""
print('CONTINUOUS_SIZE = %d' % CONTINUE_SIZE)
print('SPARSE_SIZE = %d' % SPARSE_SIZE)
sparse_feature = Input(shape=(SPARSE_SIZE,))
sparse_embedding = Embedding(55, EMBEDDING_SIZE, input_length = SPARSE_SIZE)(sparse_feature)
sparse_embedding = Reshape((EMBEDDING_SIZE * SPARSE_SIZE,))(sparse_embedding)
# print "model input size: %d" % CONTINUOUS_COLUMNS
dense_input = Input(shape=(CONTINUE_SIZE,))
merge_input = concatenate([dense_input, sparse_embedding], axis = 1)
merge_len = CONTINUE_SIZE + EMBEDDING_SIZE * SPARSE_SIZE
output = create_model(merge_len)(merge_input)
model = Model([dense_input, sparse_feature], output)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
# optimizer = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer = Adam(),
loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def keras_train(train_data, train_target, nfolds = 10):
"""
Detect Fish or noFish
"""
print("Start gen training data, shuffle and normalize!")
#train_data = train
train_target = np_utils.to_categorical(train_target)
# train_data, train_target, siamese_data_loader = siamese_train(siamese_train_data, siamese_train_label)
kf = KFold(len(train_target), n_folds=nfolds, shuffle=True)
num_fold = 0
models = []
for train_index, test_index in kf:
# model = create_model(classes = 2)
model = create_embedding_model(len(CONTINUOUS_INDICES), len(SPARSE_INDICES))
# model = create_siamese_net((train.shape)[1])
X_train = train_data[train_index]
Y_train = train_target[train_index]
print('Positive samples in train: %d' % np.sum(Y_train))
print('Negative samples in train: %d' % (len(Y_train) - np.sum(Y_train)))
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
print('Positive samples in valide: %d' % np.sum(Y_valid))
print('Negative samples in valide: %d' % (len(Y_valid) - np.sum(Y_valid)))
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
callbacks = [
EarlyStopping(monitor='val_loss', patience=5, verbose=0),
]
model.fit([X_train[:, CONTINUOUS_INDICES], X_train[:, SPARSE_INDICES]],
Y_train, batch_size=BATCH_SIZE, epochs=DNN_EPOCHS,
shuffle=True, verbose=2,
validation_data=([X_valid[:, CONTINUOUS_INDICES], X_valid[:, SPARSE_INDICES]], Y_valid)
, callbacks=callbacks)
model_name = 'keras' + strftime('_%Y_%m_%d_%H_%M_%S', gmtime())
#model.save_weights(model_name)
#siamese_features_array = gen_siamese_features(model, lgbm_train_data, siamese_train_data, siamese_train_label)
models.append((model, 'k'))
break
return models #, siamese_features_array
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame)
elif model_type == 'k':
preds = model.predict(data_frame, batch_size=BATCH_SIZE, verbose=2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit+80)
return preds
def gen_sub(models, merge_features):
"""
Evaluate single Type model
"""
print('Start generate submission!')
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, merge_features)
#print pred.shape
#print pred[0, :]
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
submission = pd.DataFrame(preds, columns=['class'+str(c+1) for c in range(9)])
submission['ID'] = pid
sub_name = "submission" + strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) + ".csv"
print('Output to ' + sub_name)
submission.to_csv(sub_name, index=False)
def create_siamese_net(input_size):
"""
"""
input_shape = (input_size, )
left_input = Input(input_shape)
right_input = Input(input_shape)
#build model to use in each siamese 'leg'
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_size))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
#encode each of the two inputs into a vector with the convnet
encoded_l = model(left_input)
encoded_r = model(right_input)
#merge two encoded inputs with the l1 distance between them
L1_distance = lambda x: K.abs(x[0]-x[1])
both = merge([encoded_l,encoded_r], mode = L1_distance, output_shape=lambda x: x[0])
merge_layer = Dense(HIDDEN_UNITS[2],activation='sigmoid')(both)
prediction = Dense(1,activation='sigmoid')(merge_layer)
siamese_net = Model(input=[left_input,right_input],output=prediction)
#optimizer = SGD(0.0004,momentum=0.6,nesterov=True,decay=0.0003)
optimizer = Adam()
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
# print siamese_net.count_params()
return siamese_net
class Siamese_Loader:
#For loading batches and testing tasks to a siamese net
def __init__(self,Xtrain,Xval = None):
self.Xval = Xval
self.Xtrain = Xtrain
self.n_classes = Xtrain.shape[0]
self.feature_size = (Xtrain[0].shape)[1]
self.n_examples = np.array([x.shape[0] for x in Xtrain])
self.n_tot_examples = np.sum(self.n_examples)
print('examples of different classes: %s' % str(self.n_examples))
# self.n_val,self.n_ex_val,_,_ = Xval.shape
def get_batch(self,n):
#Create batch of pairs, half same class, half different class
categories = rng.choice(self.n_classes,size=(n,),replace=True)
pairs=np.zeros((2, n, self.feature_size))
targets=np.zeros((n,))
positive_begin_pos = n * 1 // 2
targets[positive_begin_pos:] = 1
categories_list = []
for i in range(n):
category = categories[i]
idx_1 = rng.randint(0, self.n_examples[category])
pairs[0][i] = self.Xtrain[category][idx_1] #.reshape(self.feature_size)
#pick images of same class for 1st half, different for 2nd
category_2 = category if i >= positive_begin_pos else (category + rng.randint(1,self.n_classes)) % self.n_classes
idx_2 = rng.randint(0,self.n_examples[category_2])
while i >= positive_begin_pos and idx_2 == idx_1:
idx_2 = rng.randint(0,self.n_examples[category_2])
pairs[1][i] = self.Xtrain[category_2][idx_2] #.reshape(self.w,self.h,1)
categories_list.append((category, category_2))
#pd.DataFrame(categories_list).to_csv('categories', index=False)
#exit(0)
# shuflle pairs to mix positive and negative
rng.shuffle(pairs)
return pairs, targets
def gen_test_on_support_data(Xsupport, Xtest):
"""
"""
n_support, feature_size = Xsupport.shape
pairs = np.zeros((2, n_support, feature_size))
pairs[0] = Xtest
pairs[1] = Xsupport
return list(pairs)
def siamese_train(siamese_train_data, siamese_train_label):
"""
"""
train_data = [[] for i in range(9)]
label_ind = 0
for feature in siamese_train_data:
train_data[siamese_train_label[label_ind]].append(feature)
label_ind += 1
train_data = np.array([np.array(xi) for xi in train_data])
print("train data shape before gen pair")
print(train_data.shape)
siamese_data_loader = Siamese_Loader(train_data, test)
pairs, targets = siamese_data_loader.get_batch(SIAMESE_PAIR_SIZE)
return pairs, targets, siamese_data_loader
def gen_siamese_features_meta(model, Xsupport_label, Xsupport, Xtest):
"""
"""
siamese_pair = gen_test_on_support_data(Xsupport, Xtest)
global graph
with graph.as_default():
preds = model.predict(siamese_pair, batch_size=BATCH_SIZE, verbose=2)
preds = np.insert(preds, 1, Xsupport_label, axis = 1)
preds = pd.DataFrame(preds, columns = ['sim', 'class'])
siamese_features = preds.groupby('class', sort = False) \
.agg({'sim': ['max', 'min', 'median', 'mean', 'std']})
max_class = siamese_features['sim']['max'].idxmax()
siamese_features = np.insert(siamese_features.values.flatten(), 0, max_class, axis = 0)
return siamese_features
def gen_siamese_features(siamese_model, Xtest, Xsupport, Xsupport_label):
"""
"""
if MAX_WORKERS <= 0:
print("MAX_WORKERS should >= 1", file=sys.stderr)
exit(1)
siamese_features_array = list(range(len(Xtest)))
test_begin = 0
while test_begin < len(Xtest):
test_end = min(test_begin + MAX_WORKERS, len(Xtest))
with concurrent.futures.ThreadPoolExecutor(max_workers = MAX_WORKERS) as executor:
future_predict = {executor.submit(gen_siamese_features_meta, siamese_model,
Xsupport_label,
Xsupport,
Xtest[ind]): ind for ind in range(test_begin, test_end)}
for future in concurrent.futures.as_completed(future_predict):
ind = future_predict[future]
try:
siamese_features = future.result()
siamese_features_array[ind] = siamese_features
except Exception as exc:
print('%dth feature generated an exception: %s' % (ind, exc))
test_begin = test_end
if test_begin % 100 == 0:
print('Gen %d siamsese features' % test_begin)
if test_begin != len(Xtest):
print("Only gen %d siamese features" % test_begin, file=sys.stderr)
exit(1)
siamese_features_array = np.array(siamese_features_array)
pd.DataFrame(siamese_features_array[:, 0]).astype(np.int8).to_csv('pred_label', index = False)
return siamese_features_array
if __name__ == "__main__":
model_k = keras_train(train, y, 10)
#np.save("siamese_features_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_array)
# gen_sub(model_k, 'k', th, F1)
# ind = np.array([i * 5 for i in range(9)])
# xgbTrain(siamese_features_array[:, ind], lgbm_train_label);
#lgbm_features = siamese_features_array #np.concatenate((lgbm_train_data, siamese_features_array),
# model_l = lgbm_train(train, y, 10) #lgbm_features, lgbm_train_label, 10)#model_k)
# siamese_features_test_array = siamese_test(model_k[0][0], test)
#np.save("siamese_features_test_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_test_array)
##model_x = xgbTrain(5)#model_k)
#gen_sub(model_l, siamese_features_test_array) #model_k)
| apache-2.0 |
lhirschfeld/JargonBot | custombot.py | 1 | 3061 | import pickle
import praw
import random
from textblob import TextBlob
from datetime import datetime
from sklearn import linear_model
class RedditBot:
"""A class that performs basic operations, working with Reddit's
PRAW API."""
def __init__(self, botName):
# Setup the bot and primary variables.
self.r = praw.Reddit(botName)
self.responses = []
with open('ids.pickle', 'rb') as handle:
try:
self.ids = pickle.load(handle)
except EOFError:
self.ids = []
with open('models.pickle', 'rb') as handle:
try:
self.models = pickle.load(handle)
except EOFError:
self.models = {}
def updateIds(self):
# Save the new ids of comments that have been responded to.
with open('ids.pickle', 'wb') as handle:
pickle.dump(self.ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
def createModel(self, sub, init_fit):
new_model = linear_model.LinearRegression()
new_model.fit(init_fit[0], init_fit[1])
# TODO: Create sub class that stores this data.
self.models[sub] = (new_model, 1, init_fit[0], init_fit[1])
with open('models.pickle', 'wb') as handle:
pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
def updateModels(self, modelParams):
# Model params is a list of strings which contains the keys in
# each result which should be used to update the model.
# Models is a dictionary with a touple at each key containing:
# (linear regression, randomness rate, x fits, y fits)
currentTime = datetime.now()
oldResponses = [(currentTime - r["time"]).total_seconds() > 3600
for r in self.responses]
self.responses = [(currentTime - r["time"]).total_seconds() < 3600
for r in self.responses]
for r in oldResponses:
result = 0
url = "https://reddit.com/" + r["sID"] + "?comment=" + r["cID"]
submission = self.r.get_submission(url=url)
comment_queue = submission.comments[:]
if comment_queue:
com = comment_queue.pop(0)
result += com.score
comment_queue.extend(com.replies)
while comment_queue:
com = comment_queue.pop(0)
text = TextBlob(com.text)
result += text.sentiment.polarity * com.score
x = []
for key in modelParams:
x.append(r[key])
# Get old fits
x_fits = self.models[r["sub"]][2].append(x)
y_fits = self.models[r["sub"]][3].append(result)
self.models[r["sub"]][0].fit(x_fits, y_fits)
# Update odds of random choice
self.models[r]["sub"][1] *= 0.96
with open('models.pickle', 'wb') as handle:
pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
| mit |
molly24Huang/Cents_trip | Recommendation/attr_food_distance.py | 1 | 2978 | import pandas as pd
from math import sin, cos, sqrt, asin, radians
#import ibm_db
def cal_dist(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
distance = 6378.137 * c
return distance
food = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\food.csv'
tourism_attractions = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\TOURISM_ATTRACTIONS.csv'
food_df = pd.read_csv(food)
tourism_attractions_df = pd.read_csv(tourism_attractions)
food_data = food_df.iloc[:,[0,6,7]]
tourism_attractions_data = tourism_attractions_df.iloc[:,[0,2,3]]
foodid = food_data['FOODID'].as_matrix()
#print(len(roomid))
lat_food = food_data['LATITUDE'].as_matrix()
lng_food = food_data['LONGITUDE'].as_matrix()
attractionid = tourism_attractions_data['ATTRACTIONID'].as_matrix()
#print(attractionid)
lat_attractions = tourism_attractions_data['LATITUDE'].as_matrix()
lng_attractions = tourism_attractions_data['LONGITUDE'].as_matrix()
distances = []
# conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\
# PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\
# PWD=X_c03EeYTe#u;", "", "")
for i in range(len(tourism_attractions_data)):
for k in range(len(food_data)):
distance = cal_dist(lng_attractions[i], lat_attractions[i], lng_food[k], lat_food[k])
# print(distance)
distances.append(distance)
output = open('rating.txt','w')
k = 1
for i in range(len(tourism_attractions_data)):
for j in range(len(food_data)):
this_attractid = str(attractionid[i])
this_foodid = str(foodid[j])
this_distance = str(distances[(i + 1)* j])
output.write(this_attractid)
output.write('\t')
output.write(this_foodid)
output.write('\t')
output.write(this_distance)
output.write('\n')
output.close()
#print(len(distances))
# k = 1
# for i in range(len(tourism_attractions_data)):
# for j in range(len(food_data)):
# this_attractid = attractionid[i]
# this_foodid = foodid[j]
# this_distance = distances[(i + 1)* j]
# sql = r'INSERT INTO DISTANCE_FOOD_ATTRACTION(ATTRACTIONID, FOODID, DISTANCE) VALUES({attractionID}, {foodID}, {distance})'.format(
# attractionID=this_attractid, foodID=this_foodid, distance=this_distance
# )
# print(sql, '>>')
# try:
# stmt = ibm_db.exec_immediate(conn, sql)
# except Exception as e:
# print(e)
# print("Inserting couldn't be completed.")
# ibm_db.rollback(conn)
# else:
# ibm_db.commit(conn)
# print("Inserting complete.")
# print('-----' + str(k) + '-----')
# k += 1
# #
| apache-2.0 |
TomAugspurger/pandas | pandas/tests/series/methods/test_rename_axis.py | 4 | 1503 | import pytest
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
class TestSeriesRenameAxis:
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
ser = Series(list(range(len(mi))), index=mi)
result = ser.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = ser.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = ser.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
ser.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
def test_rename_axis_none(self, kwargs):
# GH 25034
index = Index(list("abc"), name="foo")
ser = Series([1, 2, 3], index=index)
result = ser.rename_axis(**kwargs)
expected_index = index.rename(None) if kwargs else index
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
sanketloke/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
thypad/brew | skensemble/generation/bagging.py | 3 | 2140 | import numpy as np
from sklearn.ensemble import BaggingClassifier
from brew.base import Ensemble
from brew.combination.combiner import Combiner
import sklearn
from .base import PoolGenerator
class Bagging(PoolGenerator):
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
self.ensemble = None
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.ensemble = Ensemble()
for _ in range(self.n_classifiers):
# bootstrap
idx = np.random.choice(X.shape[0], X.shape[0], replace=True)
data, target = X[idx, :], y[idx]
classifier = sklearn.base.clone(self.base_classifier)
classifier.fit(data, target)
self.ensemble.add(classifier)
return
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
class BaggingSK(PoolGenerator):
""""
This class should not be used, use brew.generation.bagging.Bagging instead.
"""
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
# using the sklearn implementation of bagging for now
self.sk_bagging = BaggingClassifier(base_estimator=base_classifier,
n_estimators=n_classifiers,
max_samples=1.0,
max_features=1.0)
self.ensemble = Ensemble()
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.sk_bagging.fit(X, y)
self.ensemble.add_classifiers(self.sk_bagging.estimators_)
# self.classes_ = set(y)
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
| mit |
MJuddBooth/pandas | pandas/tests/reshape/test_reshape.py | 1 | 25248 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
from collections import OrderedDict
import numpy as np
from numpy import nan
import pytest
from pandas.compat import u
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
from pandas.core.sparse.api import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
if sparse:
dtype_name = 'Sparse[{}, {}]'.format(
self.effective_dtype(dtype).name,
fill_value
)
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
tm.assert_series_equal(result.get_dtype_counts(), expected)
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
tm.assert_series_equal(result.get_dtype_counts().sort_index(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = pd.DataFrame({
"A_a": pd.SparseArray([1, 0, 1], dtype='uint8'),
"A_b": pd.SparseArray([0, 1, 0], dtype='uint8'),
"B_b": pd.SparseArray([1, 1, 0], dtype='uint8'),
"B_c": pd.SparseArray([0, 0, 1], dtype='uint8'),
})
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ)})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected = expected[['C'] + cols]
typ = pd.SparseArray if sparse else pd.Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat([
pd.Series([1, 2, 3], name='C'),
pd.Series([1, 0, 1], name='bad_a', dtype='Sparse[uint8]'),
pd.Series([0, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([1, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([0, 0, 1], name='bad_c', dtype='Sparse[uint8]'),
], axis=1)
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'B': ['b', 'b', 'c'],
'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0]}, dtype=np.uint8)
expected[['C']] = df[['C']]
if sparse:
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
if sparse:
cols = ['A..a', 'A..b', 'B..b', 'B..c']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'C': [1, 2, 3],
'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c']})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].apply(
lambda x: pd.SparseSeries(x)
)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': arr([1, 0, 1, 0], dtype=typ),
'A_b': arr([0, 1, 0, 0], dtype=typ),
'A_nan': arr([0, 0, 0, 1], dtype=typ),
'B_b': arr([1, 1, 0, 0], dtype=typ),
'B_c': arr([0, 0, 1, 0], dtype=typ),
'B_nan': arr([0, 0, 0, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ),
'cat_x': arr([1, 0, 0], dtype=typ),
'cat_y': arr([0, 1, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('get_dummies_kwargs,expected', [
({'data': pd.DataFrame(({u'ä': ['a']}))},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'ä']})},
pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 pd.get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sparse', [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]),
('Nation', ['AB', 'CD'])]))
df = get_dummies(df, columns=['Nation'], sparse=sparse)
df2 = df.reindex(columns=['GDP'])
tm.assert_frame_equal(df[['GDP']], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
class TestCategoricalReshape(object):
def test_reshaping_multi_index_categorical(self):
# construct a MultiIndexed DataFrame formerly created
# via `tm.makePanel().to_frame()`
cols = ['ItemA', 'ItemB', 'ItemC']
data = {c: tm.makeTimeDataFrame() for c in cols}
df = pd.concat({c: data[c].stack() for c in data}, axis='columns')
df.index.names = ['major', 'minor']
df['str'] = 'foo'
dti = df.index.levels[0]
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(dti))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=dti)
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
codes=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
pnedunuri/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
imanolarrieta/RL | rlpy/Domains/HelicopterHover.py | 4 | 16981 | """Helicopter hovering task."""
from .Domain import Domain
import numpy as np
import rlpy.Tools.transformations as trans
from rlpy.Tools.GeneralTools import cartesian
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch, Circle, Ellipse
from mpl_toolkits.mplot3d import proj3d
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann <cdann@cdann.de>"
class Arrow3D(FancyArrowPatch):
"""
Helper class for plotting arrows in 3d
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
class HelicopterHoverExtended(Domain):
"""
Implementation of a simulator that models one of the Stanford
autonomous helicopters (an XCell Tempest helicopter) in the flight
regime close to hover.
Adapted from the
`RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_
**STATE:**
The state of the helicopter is described by a 20-dimensional vector
with the following entries:
* 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward
* 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right
* 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down
* 3: u [forward velocity]
* 4: v [sideways velocity (to the right)]
* 5: w [downward velocity]
* 6: p [angular rate around helicopter's x axis]
* 7: q [angular rate around helicopter's y axis]
* 8: r [angular rate around helicopter's z axis]
* 9-12: orientation of heli in world as quaterion
* 13-18: current noise due to gusts (usually not observable!)
* 19: t number of timesteps in current episode
**REFERENCE:**
.. seealso::
Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics,
with application to modeling helicopters.
Advances in Neural Information Systems (2006).
"""
MAX_POS = 20. #: [m] maximum deviation in position in each dimension
MAX_VEL = 10. #: [m/s] maximum velocity in each dimension
MAX_ANG_RATE = 4 * np.pi # : maximum angular velocity
MAX_ANG = 1.
WIND_MAX = 5. # : maximum gust indensity
MIN_QW_BEFORE_HITTING_TERMINAL_STATE = np.cos(30. / 2. * np.pi / 180.)
wind = np.array([.0, .0, 0.]) #: wind in neutral orientation
discount_factor = 0.95 #: discount factor
gust_memory = 0.8
domain_fig = None
episodeCap = 6000
# model specific parameters from the learned model
noise_std = np.array([0.1941, 0.2975, 0.6058, 0.1508, 0.2492, 0.0734])
drag_vel_body = np.array([.18, .43, .49])
drag_ang_rate = np.array([12.78, 10.12, 8.16])
u_coeffs = np.array([33.04, -33.32, 70.54, -42.15])
tail_rotor_side_thrust = -0.54
dt = 0.01 #: length of one timestep
continuous_dims = np.arange(20)
statespace_limits_full = np.array([[-MAX_POS, MAX_POS]] * 3
+ [[-MAX_VEL, MAX_VEL]] * 3
+ [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3
+ [[-MAX_ANG, MAX_ANG]] * 4
+ [[-2., 2.]] * 6
+ [[0, episodeCap]])
statespace_limits = statespace_limits_full
# create all combinations of possible actions
_action_bounds = np.array([[-2., 2.]] * 4)
# maximum action: 2
_actions_dim = np.array(
[[-.2, -0.05, 0.05, 0.2]] * 3 + [[0., 0.15, 0.3, 0.5]])
actions = cartesian(list(_actions_dim)) #: all possible actions
actions_num = np.prod(actions.shape[0])
def __init__(self, noise_level=1., discount_factor=0.95):
self.noise_level = noise_level
self.discount_factor = discount_factor
super(HelicopterHoverExtended, self).__init__()
def s0(self):
self.state = np.zeros((20))
self.state[9] = 1.
return self.state.copy(), self.isTerminal(), self.possibleActions()
def isTerminal(self):
s = self.state
if np.any(self.statespace_limits_full[:9, 0] > s[:9]) or np.any(self.statespace_limits_full[:9, 1] < s[:9]):
return True
if len(s) <= 12:
w = np.sqrt(1. - np.sum(s[9:12] ** 2))
else:
w = s[9]
return np.abs(w) < self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE
def _get_reward(self):
s = self.state
if self.isTerminal():
r = -np.sum(self.statespace_limits[:9, 1] ** 2)
#r -= np.sum(self.statespace_limits[10:12, 1] ** 2)
r -= (1. - self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE ** 2)
return r * (self.episodeCap - s[-1])
else:
return -np.sum(s[:9] ** 2) - np.sum(s[10:12] ** 2)
def possibleActions(self, s=None):
return np.arange(self.actions_num)
def step(self, a):
a = self.actions[a]
# make sure the actions are not beyond their limits
a = np.maximum(self._action_bounds[:, 0], np.minimum(a,
self._action_bounds[:, 1]))
pos, vel, ang_rate, ori_bases, q = self._state_in_world(self.state)
t = self.state[-1]
gust_noise = self.state[13:19]
gust_noise = (self.gust_memory * gust_noise
+ (1. - self.gust_memory) * self.random_state.randn(6) * self.noise_level * self.noise_std)
# update noise which simulates gusts
for i in range(10):
# Euler integration
# position
pos += self.dt * vel
# compute acceleration on the helicopter
vel_body = self._in_world_coord(vel, q)
wind_body = self._in_world_coord(self.wind, q)
wind_body[-1] = 0. # the java implementation
# has it this way
acc_body = -self.drag_vel_body * (vel_body + wind_body)
acc_body[-1] += self.u_coeffs[-1] * a[-1]
acc_body[1] += self.tail_rotor_side_thrust
acc_body += gust_noise[:3]
acc = self._in_body_coord(acc_body, q)
acc[-1] += 9.81 # gravity
# velocity
vel += self.dt * acc
# orientation
tmp = self.dt * ang_rate
qdt = trans.quaternion_about_axis(np.linalg.norm(tmp), tmp)
q = trans.quaternion_multiply(q, qdt)
#assert np.allclose(1., np.sum(q**2))
# angular accelerations
ang_acc = -ang_rate * self.drag_ang_rate + \
self.u_coeffs[:3] * a[:3]
ang_acc += gust_noise[3:]
ang_rate += self.dt * ang_acc
st = np.zeros_like(self.state)
st[:3] = -self._in_body_coord(pos, q)
st[3:6] = self._in_body_coord(vel, q)
st[6:9] = ang_rate
st[9:13] = q
st[13:19] = gust_noise
st[-1] = t + 1
self.state = st.copy()
return (
self._get_reward(), st, self.isTerminal(), self.possibleActions()
)
def _state_in_world(self, s):
"""
transforms state from body coordinates in world coordinates
.. warning::
angular rate still in body frame!
"""
pos_body = s[:3]
vel_body = s[3:6]
ang_rate = s[6:9].copy()
q = s[9:13].copy()
pos = self._in_world_coord(-pos_body, q)
vel = self._in_world_coord(vel_body, q)
rot = trans.quaternion_matrix(trans.quaternion_conjugate(q))[:3, :3]
return pos, vel, ang_rate, rot, q
def _in_body_coord(self, p, q):
"""
q is the inverse quaternion of the rotation of the helicopter in world coordinates
"""
q_pos = np.zeros((4))
q_pos[1:] = p
q_p = trans.quaternion_multiply(trans.quaternion_multiply(q, q_pos),
trans.quaternion_conjugate(q))
return q_p[1:]
def _in_world_coord(self, p, q):
"""
q is the inverse quaternion of the rotation of the helicopter in world coordinates
"""
return self._in_body_coord(p, trans.quaternion_conjugate(q))
def showDomain(self, a=None):
s = self.state
if a is not None:
a = self.actions[a].copy() * 3 # amplify for visualization
pos, vel, ang_rate, ori_bases, _ = self._state_in_world(s)
coords = np.zeros((3, 3, 2)) + pos[None, :, None]
coords[:, :, 1] += ori_bases * 4
u, v = np.mgrid[0:2 * np.pi:10j, 0:2:1.]
# rotor coordinates
coord = np.zeros([3] + list(u.shape))
coord[0] = .1 * np.sin(u) * v
coord[1] = 0.
coord[2] = .1 * np.cos(u) * v
coord[0] -= 0.8
coord_side = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord)
coord_side += pos[:, None, None]
coord = np.zeros([3] + list(u.shape))
coord[0] = .6 * np.cos(u) * v
coord[1] = .6 * np.sin(u) * v
coord[2] = -.4
coord_main = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord)
coord_main += pos[:, None, None]
style = dict(fc="r", ec="r", lw=2., head_width=0.05, head_length=0.1)
if self.domain_fig is None:
self.domain_fig = plt.figure(figsize=(12, 8))
# action axes
ax1 = plt.subplot2grid((1, 3), (0, 0), frameon=False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
lim = 2 # self.MAX_POS
ax1.set_xlim(-lim, lim)
ax1.set_ylim(-lim, lim)
if a is None:
a = np.zeros((4))
# main rotor
ax1.add_artist(Circle(np.zeros((2)), radius=0.6))
ax1.add_artist(Ellipse(np.array([0, 1.5]), height=0.3, width=0.02))
# TODO make sure the actions are plotted right
# main rotor direction?
arr1 = ax1.arrow(0, 0, a[0], 0, **style)
arr2 = ax1.arrow(0, 0, 0, a[1], **style)
# side rotor throttle?
arr3 = ax1.arrow(0, 1.5, a[2], 0, **style)
# main rotor throttle
arr4 = ax1.arrow(1.5, 0, 0, a[3], **style)
ax1.set_aspect("equal")
self.action_arrows = (arr1, arr2, arr3, arr4)
self.action_ax = ax1
#ax = self.domain_fig.gca(projection='3d')
ax = plt.subplot2grid((1, 3), (0, 1), colspan=2, projection='3d')
ax.view_init(elev=np.pi)
# print origin
x = Arrow3D([0, 2], [0, 0], [0, 0], mutation_scale=30, lw=1,
arrowstyle="-|>", color="r")
y = Arrow3D([0, 0], [0, 2], [0, 0], mutation_scale=30, lw=1,
arrowstyle="-|>", color="b")
z = Arrow3D([0, 0], [0, 0], [0, 2], mutation_scale=30, lw=1,
arrowstyle="-|>", color="g")
ax.add_artist(x)
ax.add_artist(y)
ax.add_artist(z)
# print helicopter coordinate axes
x = Arrow3D(*coords[0], mutation_scale=30, lw=2, arrowstyle="-|>",
color="r")
y = Arrow3D(*coords[1], mutation_scale=30, lw=2, arrowstyle="-|>",
color="b")
z = Arrow3D(*coords[2], mutation_scale=30, lw=2, arrowstyle="-|>",
color="g")
ax.add_artist(x)
ax.add_artist(y)
ax.add_artist(z)
self.heli_arrows = (x, y, z)
self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1],
coord_main[2], color="k")
self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1],
coord_side[2], color="k")
self._ax = ax
ax.set_aspect("equal")
lim = 5 # self.MAX_POS
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_zlim(-lim, lim)
ax.view_init(elev=-135)
plt.show()
else:
self.heli_arrows[0]._verts3d = tuple(coords[0])
self.heli_arrows[1]._verts3d = tuple(coords[1])
self.heli_arrows[2]._verts3d = tuple(coords[2])
ax = self._ax
ax.collections.remove(self._wframe_main)
ax.collections.remove(self._wframe_side)
for arr in self.action_arrows:
self.action_ax.artists.remove(arr)
ax1 = self.action_ax
# TODO make sure the actions are plotted right
# main rotor direction?
arr1 = ax1.arrow(0, 0, a[0], 0, **style)
arr2 = ax1.arrow(0, 0, 0, a[1], **style)
# side rotor throttle?
arr3 = ax1.arrow(0, 1.5, a[2], 0, **style)
# main rotor throttle
arr4 = ax1.arrow(1.5, 0, 0, a[3], **style)
self.action_arrows = (arr1, arr2, arr3, arr4)
self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1],
coord_main[2], color="k")
self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1],
coord_side[2], color="k")
ax.set_aspect("equal")
lim = 5 # self.MAX_POS
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_zlim(-lim, lim)
ax.view_init(elev=-135)
self.domain_fig.canvas.draw()
class HelicopterHover(HelicopterHoverExtended):
"""
.. warning::
This domain has an internal hidden state, as it actually is
a POMDP. Besides the 12-dimensional observable state, there is an internal
state saved as ``self.hidden_state_`` (time and long-term noise which
simulated gusts of wind).
be aware of this state if you use this class to produce samples which are
not in order
Implementation of a simulator that models one of the Stanford
autonomous helicopters (an XCell Tempest helicopter) in the flight
regime close to hover.
Adapted from the
`RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_
**STATE:**
The state of the helicopter is described by a 12-dimensional vector
with the following entries:
* 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward
* 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right
* 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down
* 3: u [forward velocity]
* 4: v [sideways velocity (to the right)]
* 5: w [downward velocity]
* 6: p [angular rate around helicopter's x axis]
* 7: q [angular rate around helicopter's y axis]
* 8: r [angular rate around helicopter's z axis]
* 9-11: orientation of the world in the heli system as quaterion
**REFERENCE:**
.. seealso::
Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics,
with application to modeling helicopters.
Advances in Neural Information Systems (2006).
"""
episodeCap = 6000
MAX_POS = 20. # m
MAX_VEL = 10. # m/s
MAX_ANG_RATE = 4 * np.pi
MAX_ANG = 1.
WIND_MAX = 5.
continuous_dims = np.arange(12)
statespace_limits = np.array([[-MAX_POS, MAX_POS]] * 3
+ [[-MAX_VEL, MAX_VEL]] * 3
+ [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3
+ [[-MAX_ANG, MAX_ANG]] * 3)
#full_state_ = np.zeros((20))
def s0(self):
#self.hidden_state_ = np.zeros((8))
#self.hidden_state_[0] = 1.
s_full, term, p_actions = super(HelicopterHover, self).s0()
s, _ = self._split_state(s_full)
return s, term, p_actions
def _split_state(self, s):
s_observable = np.zeros((12))
s_observable[:9] = s[:9]
s_observable[9:12] = s[10:13]
s_hidden = np.zeros((8))
s_hidden[0] = s[9]
s_hidden[1:] = s[13:]
return s_observable, s_hidden
def step(self, a):
#s_extended = self._augment_state(s)
r, st, term, p_actions = super(HelicopterHover, self).step(a)
st, _ = self._split_state(st)
return (r, st, term, p_actions)
| bsd-3-clause |
kbrose/article-tagging | lib/tagnews/utils/load_data.py | 1 | 18109 | import pandas as pd
import numpy as np
import re
import json
import os
import warnings
import shutil
from pathlib import Path
import codecs
"""
Helper functions to load the article data. The main method to use
is load_data().
"""
# Caution! Modifying this in code will have no effect since the
# default arguments are populated with this reference at creation
# time, so post-hoc modifications will do nothing.
__data_folder = os.path.join(os.path.split(__file__)[0], '..', 'data')
def clean_string(s):
"""
Clean all the HTML/Unicode nastiness out of a string.
Replaces newlines with spaces.
"""
return s.replace('\r', '').replace('\n', ' ').replace('\xa0', ' ').strip()
def load_articles(data_folder=__data_folder, nrows=None):
"""
Loads the articles CSV. Can optionally only load the first
`nrows` number of rows.
"""
column_names = ['id',
'feedname',
'url',
'orig_html',
'title',
'bodytext',
'relevant',
'created',
'last_modified',
'news_source_id',
'author']
return pd.read_csv(os.path.join(data_folder,
'newsarticles_article.csv'),
header=None,
names=column_names,
nrows=nrows,
dtype={'orig_html': str, 'author': str})
def load_taggings(data_folder=__data_folder):
"""Loads the type-of-crime human tagging of the articles."""
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc = pd.read_csv(os.path.join(data_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
uc.set_index('id', drop=True, inplace=True)
uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
uc_tags = pd.read_csv(
os.path.join(data_folder, 'newsarticles_usercoding_categories.csv'),
header=None,
names=uc_tags_column_names
)
uc_tags.set_index('usercoding_id', drop=True, inplace=True)
uc_tags['article_id'] = uc.loc[uc_tags.index, 'article_id']
return uc_tags
def load_model_categories(data_folder=__data_folder):
tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tcr = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcategoryrelevance.csv'),
names=tcr_names
)
tc = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
names=tc_names
).set_index('id', drop=True)
tcr['article_id'] = tc.loc[tcr['coding_id']]['article_id'].values
return tcr
def load_model_locations(data_folder=__data_folder):
tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tl = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedlocation.csv'),
names=tl_names
)
tc = pd.read_csv(
os.path.join(data_folder, 'newsarticles_trainedcoding.csv'),
names=tc_names
).set_index('id', drop=True)
tl['article_id'] = tc.loc[tl['coding_id']]['article_id'].values
return tl
def load_locations(data_folder=__data_folder):
"""Load the human-extracted locations from the articles."""
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc = pd.read_csv(os.path.join(data_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
uc['locations'] = uc['locations'].apply(lambda x: json.loads(x))
return uc
def load_categories(data_folder=__data_folder):
"""Loads the mapping of id to names/abbrevations of categories"""
column_names = ['id', 'category_name', 'abbreviation', 'created',
'active', 'kind']
return pd.read_csv(os.path.join(data_folder, 'newsarticles_category.csv'),
header=None,
names=column_names)
def load_data(data_folder=__data_folder, nrows=None):
"""
Creates a dataframe of the article information and k-hot encodes the tags
into columns called cat_NUMBER. The k-hot encoding is done assuming that
the categories are 1-indexed and there are as many categories as the
maximum value of the numerical cateogry_id column.
Inputs:
data_folder:
A folder containing the data files in CSV format.
nrows:
Number of articles to load. Defaults to all, which uses about 4
GB of memory.
"""
df = load_articles(data_folder=data_folder, nrows=nrows)
df['relevant'] = df['relevant'] == 't'
df.rename(columns={'id': 'article_id'}, inplace=True)
df.set_index('article_id', drop=True, inplace=True)
# hopefully this will save some memory/space, can add back if needed
del(df['orig_html'])
tags_df = load_taggings(data_folder)
# will help cacheing
tags_df.sort_values(by='article_id', inplace=True)
tags_df = tags_df.loc[tags_df['article_id'].isin(
df.index.intersection(tags_df['article_id']))]
locs_df = load_locations(data_folder)
locs_df.sort_values(by='article_id', inplace=True)
locs_df = locs_df.loc[locs_df['article_id'].isin(
df.index.intersection(locs_df['article_id']))]
model_tags_df = load_model_categories(data_folder)
# will help cacheing
model_tags_df.sort_values(by='article_id', inplace=True)
model_tags_df = model_tags_df.loc[model_tags_df['article_id'].isin(
df.index.intersection(model_tags_df['article_id']))]
# init with empty lists
df['locations'] = np.empty([df.shape[0], 0]).tolist()
loc_article_ids = locs_df['article_id'].values
df.loc[loc_article_ids, 'locations'] = locs_df['locations'].values
def find_loc_in_string(locs, string):
"""
The locations are generated from JavaScript, which means there's
going to be some problems getting things to line up exactly and
neatly. This function will hopefully performa all necessary
transformations to find the given location text within the
larger string.
Inputs:
locs: list of locations as loaded by load_locations
string: bodytext of article in which to find locs
Returns:
updated_locs: list of locations as loaded by
load_locations, but with a couple
extra fields ('cleaned text' and 'cleaned span').
"""
for i, loc in enumerate(locs):
loc_txt = loc['text']
loc_txt = clean_string(loc_txt)
string = clean_string(string)
loc['cleaned text'] = loc_txt
spans = [x.span() for x in re.finditer(re.escape(loc_txt), string)]
if spans:
# The string may have occurred multiple times, and since the
# spans don't line up perfectly we can't know which one is the
# "correct" one. Best we can do is find the python span closest
# to the expected javascript span.
closest = np.argmin(np.abs(
np.array([x[0] for x in spans]) - loc['start']
))
loc['cleaned span'] = spans[closest]
locs[i] = loc
return locs
df['locations'] = df.apply(
lambda r: find_loc_in_string(r['locations'], r['bodytext']),
axis=1
)
num_no_match = df['locations'].apply(
lambda locs: any([('cleaned span' not in loc) for loc in locs])
).sum()
if num_no_match:
warnings.warn(('{} location strings were not found in'
' the bodytext.').format(num_no_match),
RuntimeWarning)
model_locations_df = load_model_locations(data_folder)
model_locations_df = model_locations_df.set_index('article_id')
model_locations_gb = model_locations_df.groupby('article_id')
model_locations_text = model_locations_gb['text'].apply(list)
df['model_location_text'] = model_locations_text
categories_df = load_categories(data_folder)
categories_df.set_index('id', drop=True, inplace=True)
# tags_df['category_id'] = tags_df['category_id'].astype(str)
tags_df['category_abbreviation'] = (categories_df
['abbreviation']
[tags_df['category_id']]
.values)
model_tags_df['category_abbreviation'] = (categories_df
['abbreviation']
[model_tags_df['category_id']]
.values)
if np.setdiff1d(tags_df['article_id'].values, df.index.values).size:
warnings.warn('Tags were found for article IDs that do not exist.',
RuntimeWarning)
def update_df_with_categories(article_ids, cat_abbreviations, vals,
is_model):
# for some reason, some articles that are tagged don't show up
# in the articles CSV. filter those out.
existing_ids_filter = np.isin(article_ids, df.index.values)
article_ids = article_ids[existing_ids_filter]
cat_abbreviations = cat_abbreviations[existing_ids_filter]
vals = vals[existing_ids_filter]
for i in range(categories_df.shape[0]):
cat_name = categories_df.loc[i+1, 'abbreviation']
if is_model:
cat_name += '_model'
df[cat_name] = 0
if not is_model:
df[cat_name] = df[cat_name].astype('int8')
matches = cat_abbreviations == cat_name
if not matches.sum():
continue
df.loc[article_ids[matches], cat_name] = vals[matches]
update_df_with_categories(
model_tags_df['article_id'].values,
model_tags_df['category_abbreviation'].values + '_model',
model_tags_df['relevance'].values,
is_model=True
)
update_df_with_categories(
tags_df['article_id'].values,
tags_df['category_abbreviation'].values,
np.ones((tags_df['article_id'].values.shape), dtype='int8'),
is_model=False
)
df.loc[df['bodytext'].isnull(), 'bodytext'] = ''
return df
def subsample_and_resave(out_folder, n=5, input_folder=__data_folder,
random_seed=5):
"""
Subsamples the CSV data files so that we have at least
`n` articles from each type-of-crime tag as determined
by the human coding. Saves the subsampled CSV data
into `out_folder`. If there are fewer than `n` articles
tagged with a type-of-crime, then we will use all of
the articles with that tag.
Inputs
------
out_folder : str
Path to folder where data should be saved. Should already exist.
n : int
How many examples from each category should we have?
input_folder : str
Path to where the full CSV files are saved.
random_seed : None or int
np.random.RandomState() will be seeded with this value
in order to perform the random subsampling.
"""
out_folder = str(Path(out_folder).expanduser().absolute())
input_folder = str(Path(input_folder).expanduser().absolute())
if out_folder == input_folder:
raise RuntimeError('out_folder cannot match input_folder.')
random_state = np.random.RandomState(random_seed)
df = load_data(input_folder)
chosen_indexes = []
for crime_type in df.loc[:, 'OEMC':].columns:
is_type = df[crime_type].astype(bool)
n_samps = min(n, is_type.sum())
chosen_indexes += (df.loc[is_type, :]
.sample(n_samps, random_state=random_state)
.index
.tolist())
del df
chosen_indexes = sorted(list(set(chosen_indexes)))
# newsarticles_article.csv
articles_df = load_articles(input_folder)
sample = (articles_df
.reset_index()
.set_index('id')
.loc[chosen_indexes, 'index'])
articles_df = articles_df.loc[sample, :]
# garble garble
articles_df['bodytext'] = articles_df['bodytext'].astype(str).apply(
lambda x: codecs.encode(x, 'rot-13')
)
articles_df.to_csv(os.path.join(out_folder, 'newsarticles_article.csv'),
header=None, index=False)
del articles_df
# newsarticles_category.csv
shutil.copyfile(os.path.join(input_folder, 'newsarticles_category.csv'),
os.path.join(out_folder, 'newsarticles_category.csv'))
# newsarticles_usercoding.csv
uc_column_names = ['id', 'date', 'relevant',
'article_id', 'user_id', 'locations']
uc_df = pd.read_csv(os.path.join(input_folder,
'newsarticles_usercoding.csv'),
header=None,
names=uc_column_names)
sample = np.where(uc_df['article_id'].isin(chosen_indexes))[0]
uc_df.loc[sample, :].to_csv(
os.path.join(out_folder, 'newsarticles_usercoding.csv'),
header=None, index=False
)
uc_tags_column_names = ['id', 'usercoding_id', 'category_id']
# newsarticles_usercoding_categories.csv
uc_tags_df = pd.read_csv(
os.path.join(input_folder,
'newsarticles_usercoding_categories.csv'),
header=None,
names=uc_tags_column_names,
dtype={'id': int, 'usercoding_id': int, 'category_id': int}
)
sample = np.where(uc_df
.set_index('id')
.loc[uc_tags_df['usercoding_id'], 'article_id']
.isin(chosen_indexes)
)[0]
uc_tags_df = uc_tags_df.loc[sample, :]
uc_tags_df.to_csv(
os.path.join(out_folder, 'newsarticles_usercoding_categories.csv'),
header=None, index=False
)
# newsarticles_trainedcoding
tc_names = ['id', 'date', 'model_info', 'relevance', 'article_id']
tc = pd.read_csv(
'tagnews/data/newsarticles_trainedcoding.csv',
names=tc_names
)
tc = tc.loc[tc['article_id'].isin(chosen_indexes)]
tc.to_csv(
os.path.join(out_folder, 'newsarticles_trainedcoding.csv'),
header=False, index=False
)
# newsarticles_trainedcategoryrelevance
tcr_names = ['id', 'relevance', 'category_id', 'coding_id']
tcr = pd.read_csv(
'tagnews/data/newsarticles_trainedcategoryrelevance.csv',
names=tcr_names
)
tcr = tcr.loc[tcr['coding_id'].isin(tc['id'])]
tcr.to_csv(
os.path.join(out_folder, 'newsarticles_trainedcategoryrelevance.csv'),
header=False, index=False
)
# newsarticles_trainedlocation
tl_names = ['id', 'text', 'latitude', 'longitude', 'coding_id']
tl = pd.read_csv(
'tagnews/data/newsarticles_trainedlocation.csv',
names=tl_names
)
tl = tl.loc[tl['coding_id'].isin(tc['id'])]
tl.to_csv(
os.path.join(out_folder, 'newsarticles_trainedlocation.csv'),
header=False, index=False
)
def load_crime_data(data_folder=__data_folder):
crimes = pd.read_csv(os.path.join(data_folder, 'Crimes.csv'))
crimes = crimes[crimes['Year'] > 2010]
crime_string = pd.Series('', crimes.index)
# ['ID', 'Case Number', 'Date', 'Block', 'IUCR', 'Primary Type',
# 'Description', 'Location Description', 'Arrest', 'Domestic', 'Beat',
# 'District', 'Ward', 'Community Area', 'FBI Code', 'X Coordinate',
# 'Y Coordinate', 'Year', 'Updated On', 'Latitude', 'Longitude',
# 'Location']
# TODO: synonyms on this for month name, weekday name,
# time of day (e.g. afternoon), etc.
crime_string += crimes['Date'] + ' '
# TODO: synonyms?
crime_string += crimes['Primary Type'] + ' '
# TODO: synonyms?
crime_string += crimes['Description'] + ' '
# TODO: synonyms?
crime_string += crimes['Location Description'] + ' '
# TODO: synonyms?
iucr = pd.read_csv(os.path.join(data_folder, 'IUCR.csv'))
iucr.set_index('IUCR', drop=True, inplace=True)
idx = iucr.index
idx_values = idx.values
idx_values[idx.str.len() == 3] = '0' + idx_values[idx.str.len() == 3]
crime_string += (iucr.loc[crimes['IUCR'], 'PRIMARY DESCRIPTION']
.fillna('')
.values
+ ' ')
crime_string += (iucr.loc[crimes['IUCR'], 'SECONDARY DESCRIPTION']
.fillna('')
.values
+ ' ')
community_areas = pd.read_csv(os.path.join(data_folder, 'CommAreas.csv'))
community_areas.set_index('AREA_NUM_1', inplace=True, drop=True)
crime_string += (community_areas.loc[crimes['Community Area'], 'COMMUNITY']
.fillna('')
.values
+ ' ')
return crimes, crime_string
def load_ner_data(data_folder=__data_folder):
"""
Loads ner.csv from the specified data folder.
The column 'stag' is a binary value indicating whether or not
the row corresponds to the entity "geo". Typically, you will
want to use column 'word' to predict the column 'stag'.
"""
df = pd.read_csv(os.path.join(data_folder, 'ner.csv'),
encoding="ISO-8859-1",
error_bad_lines=False,
index_col=0)
df.dropna(subset=['word', 'tag'], inplace=True)
df.reset_index(inplace=True, drop=True)
df['stag'] = (df['tag'] == 'B-geo') | (df['tag'] == 'I-geo')
df['all_tags'] = df['tag']
df['tag'] = df['stag']
df = df[['word', 'all_tags', 'tag']]
return df
| mit |
arrow-/simQuad | ground_station/gyro_scope.py | 2 | 5471 | '''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
IMPORTANT!!
It is suggested you run this script with mpu_level2.ino first to see and understand
its operation.
Basically this script EXPECTS:
Arduino is providing space separated gyro readings @ ~5ms intervals (via MPU Interrupt).
* Each serial packet must be ASCII and look like:
[x_gyro]<space>[y_gyro]<space>[z_gyro]<newline>
+ You need to specify correct Serial port
+ You need to set the Y-limits of the plot axis.
+ You need to use correct value of "dt".
+ You need to set the correct conversion factor for Gyro readings.
Mode 0 1 2 3
Range +-250 +-500 +-1000 +-2000
Conv. 131 65.5 32.75 16.375
AND it DELIVERS:
* 3 axis loss-less Gyro readings plot (almost real time).
* 3D visualisation of current orientation based on gyro vals
If you want to just plot data in ~real time use {oscilloscope.py}.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import serial, time
def rotate(v, axis, theta):
'''
Rotates "v" vector about "axis" vector by "theta" radians, returns vector
'''
c = np.cos(theta)
s = np.sin(theta)
t = 1-c
mat = np.array([ [c+axis[0]*axis[0]*t, axis[0]*axis[1]*t-axis[2]*s, axis[0]*axis[2]*t+axis[1]*s],
[axis[0]*axis[1]*t+axis[2]*s, c+axis[1]*axis[1]*t, axis[1]*axis[2]*t-axis[0]*s],
[axis[0]*axis[2]*t-axis[1]*s, axis[1]*axis[2]*t+axis[0]*s, c+axis[2]*axis[2]*t] ])
return mat.dot(v.T)
def calcPose(omega):
'''
Helper function. Finds the "d"-theta, then calls rotate.
Omega must be in ** degrees per second **
'''
theta = omega*dt*np.pi/180 #theta is "d-theta" in radians
rpy[1] = rotate(rpy[1], rpy[0], theta[0])
rpy[0] = rotate(rpy[0], rpy[1], theta[1])
rpy[2] = np.cross(rpy[0], rpy[1])
rpy[1] = rotate(rpy[1], rpy[2], theta[2])
rpy[0] = rotate(rpy[0], rpy[2], theta[2])
plt.ion()
# SET CORRECT PORT NUM HERE
arduino = serial.Serial('/dev/ttyACM0', 57600)
# dt is found experimentally. Contact Ananya for details. Basically this the time between
# 2 MPU(gyro) interrupts. The np.pi/180 converts deg/sec to rad/sec.
# SET CORRECT dt HERE. TIME IN SECONDS BETWEEN TWO SENSOR PACKETS AS RECVD. BY ARDUINO.
dt = .005 # 5msec
# rpy is original orientation. These vectors are updated by calcPose()
rpy = np.eye(3)
fig = plt.figure(figsize=(16,6))
axes = fig.add_subplot(121)
a3d = fig.add_subplot(122, projection='3d')
a3d.set_xlim(-1.2,1.2)
a3d.set_ylim(-1.2,1.2)
a3d.set_zlim(-1.2,1.2)
a3d.scatter([0], [0], [0], s=40)
r, = a3d.plot([0,1], [0,0], [0,0], lw=2, c='black')
p, = a3d.plot([0,0], [0,1], [0,0], lw=2, c='red')
a3d.plot([0,2], [0,0], [0,0], c='cyan')
a3d.plot([0,0], [0,2], [0,0], c='brown')
a3d.plot([0,0], [0,0], [0,2], c='green')
a3d.plot([0,-2], [0,0], [0,0], ls='--', c='cyan')
a3d.plot([0,0], [0,-2], [0,0], ls='--', c='brown')
a3d.plot([0,0], [0,0], [0,-2], ls='--', c='green')
num_samples = 0
buff = 0
# "buff" counts till 50. Every time it reaches fifty, plt.draw() is called, since
# plt.draw() is a costly operation. Normal list append and pose calculations are fast.
# So, do those diligently, for every sample, but update display
# rarely (while ensuring smooth animation).
gyro_x = [0]
gyro_y = [0] # gyro data lists. I use them like queues.
gyro_z = [0]
t = [0]
# scopes is a list of 3 matplotlib.Line_2D objects.
scopes = [axes.plot(t, gyro_x, label=r'$\omega_x$')[0], axes.plot(t, gyro_y, label=r'$\omega_y$')[0], axes.plot(t, gyro_z, label=r'$\omega_z$')[0]]
axes.legend(prop=dict(size=14))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
axes.set_ylim(-505, 505) # SET CORRECT Y-LIM HERE
conversion = 65.5 #Gyro 500 SET CORRECT CONV FACTOR HERE
# Refer datasheet. Convert ADC result into a Physical measurement.
# If you don't understand this, pls. leave project.
print 'Me Ready'
time.sleep(2)
#Handshake MAY BE REDUNDANT
print arduino.inWaiting()
arduino.flushInput()
arduino.write('e')
print 'Sent Request...'
data = [0]*6
while True:
try:
num = arduino.read(12)
num = [ord(x) for x in num]
except:
print 'Serial error!'
raise RuntimeError
_ind=0 #this var is connected to for loop below!!
for i in range(0,12, 2):
data[_ind] = (num[i]<<8)|num[i+1]
if data[_ind] & 0x8000:
data[_ind] = data[_ind] - 0x10000
_ind += 1
#print data[3:]
datas = np.array([float(data[3])/conversion, float(data[4])/conversion, float(data[5])/conversion])
gyro_x.append(datas[0])
gyro_y.append(datas[1])
gyro_z.append(datas[2])
num_samples += 1
t.append(num_samples)
calcPose(datas) #This function updates the global variable: "rpy"
if num_samples>200:
del t[0]
del gyro_x[0]
del gyro_y[0]
del gyro_z[0]
axes.set_xlim(t[0], num_samples)
scopes[0].set_data(t, gyro_x)
scopes[1].set_data(t, gyro_y)
scopes[2].set_data(t, gyro_z)
# pose matrix is just an easier way of giving input to the .set_data()
# and .set_3d_properties() methods. You see, line needs 2 (end) points:
# the rpy entries AND THE ORIGIN. pose matrix does just that: specifies
# BOTH end points.
pose = np.array([np.array([np.zeros(3), rpy[0]]).T, np.array([np.zeros(3), rpy[1]]).T, np.array([np.zeros(3), rpy[2]]).T])
r.set_data(pose[0][:2])
r.set_3d_properties(pose[0][2])
p.set_data(pose[1][:2])
p.set_3d_properties(pose[1][2])
if buff>25:
buff=0
plt.draw()
buff += 1
plt.ioff()
plt.show() | gpl-2.0 |
hainm/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
afruizc/microsoft_malware_challenge | src/models/first_model/get_conf_matrix.py | 2 | 2842 | """
This is a script that is used to generate a confussion matrix for
a classification method. This uses 10-k cross_validation with in
order to provide sensible resutls and not overfit.
"""
__author__ = "Andres Ruiz"
__license__ = "Apache"
__email__ = "afruizc __thingy__ cs unm edu"
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix, accuracy_score, log_loss
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import svm_bow
def plot_confusion_matrix(cm, title='Confusion matrix', normalized=True,
cmap=plt.cm.Oranges, save_file=""):
"""
Displays the confussion matrix indicated by `cm`. If argument
`normalized` is Ture, then the matrix is normalized. Optionally
the image can be saved to a file
Arguments:
----------
`cm`: The confusion matrix to be displayed.
`title`: The title for the window.
`normalized`: If True, normalizes the matrix before showing it.
`cmap`: Colormap to use.
`save_file`: If string different than empty, the resulting image is
stored in such file.
"""
if normalized:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if save_file:
plt.savefig(save_file)
def get_indices(data, indices):
result = []
for i in indices:
result.append(data[i])
return result
def main():
e = svm_bow.Executor()
e.load_data()
e.config_model()
fold = KFold(len(e.train['data']), n_folds=10)
conf_mat_avg = np.zeros((9, 9))
c = 0
for train, test in fold:
X_train = get_indices(e.train['data'], train)
X_test = get_indices(e.train['data'], test)
y_train = get_indices(e.train['target'], train)
y_test = get_indices(e.train['target'], test)
c += 1
print("Fitting run {}.".format(c))
model = e.param_tunning.fit(X_train, y_train)
print("Predicting...")
y_pred = model.predict(X_test)
y_pred_prob = model.predict_proba(X_test)
conf_matrix = confusion_matrix(y_test, y_pred)
accruacy = accuracy_score(y_test, y_pred)
loss = log_loss(y_test, y_pred_prob)
plot_confusion_matrix(conf_matrix,
save_file='fold_{}.png'.format(c))
np.savetxt('conf_matrix_fold{}'.format(c), conf_matrix)
print("Fold %d. Accuracy: %lf Loss: %lf" % (c, accruacy, loss))
conf_mat_avg += conf_matrix
np.savetxt('conf_matrix.txt', conf_mat_avg)
conf_mat_avg /= 10.0
plot_confusion_matrix(conf_mat_avg, save_file='final_cm.png')
if __name__ == '__main__':
main()
| apache-2.0 |
KDD-OpenSource/geox-young-academy | day-3/Kalman-filter_Mark.py | 1 | 1494 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 10:10:24 2017
@author: Mark
"""
import numpy as np
import matplotlib.pyplot as plt
#Define functions
def model(state_0,A,B):
state_1 = A*state_0 + np.random.normal(0,B)
return state_1
state_null=np.random.normal(0,0.4)
def observation_function(state,R):
obs=state+np.random.normal(0,R)
return obs
def forecast(state_0,cov_0,A,B):
state_1=A*state_0
cov_1=A*cov_0*A+B
return state_1,cov_1
def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0):
state_1 = state_1_hat - K*(H*state_1_hat - obs_0)
cov_1 = cov_1_hat - K*H*cov_1_hat
return state_1, cov_1
def kalman_gain(cov_1_hat,H,R):
K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1)
return K
#Initialize model parameters
A = 0.5
H = 1
B = 0.5
R = 0.1
lev = 100
#Sythetic Model
STATE_real = np.zeros(lev)
OBS_real = np.zeros(lev)
STATE_real[0] = np.random.normal(5,0.1)
OBS_real[0] = observation_function(STATE_real[0],R)
for i in range (1,lev-1):
STATE_real[i] = model(STATE_real[i-1],0.4,0.01)
OBS_real[i] = observation_function(STATE_real[i],R)
#Kalman-filter
STATE = np.zeros(lev)
COV = np.zeros(lev)
STATE[0] = state_null
COV[0] = B
for i in range (1,lev-1):
(state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B)
K = kalman_gain(cov_hat,H,R)
(STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i])
plt.plot(STATE)
plt.plot(STATE_real)
| mit |
eteq/bokeh | examples/interactions/interactive_bubble/data.py | 49 | 1265 | import numpy as np
from bokeh.palettes import Spectral6
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
| bsd-3-clause |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/docstring.py | 1 | 2522 | from __future__ import print_function
import string
from matplotlib import inspect
class FormatDict(dict):
"""Adapted from http://stackoverflow.com/questions/11283961/partial-string-formatting"""
def __missing__(self, key):
return "{" + key + "}"
class DocReplacer(object):
"""Decorator object for replacing patterns in docstrings using string.format."""
def __init__(self, auto_dedent=True, allow_partial_formatting=False, **doc_dict):
'''
Parameters
-------------
auto_indent : bool
Flag for automatically indenting the replaced lines to the level of the docstring.
allow_partial_formatting : bool
Emnables partial formatting (i.e., not all keys are available in the dictionary)
doc_dict : kwargs
Pattern in docstring that a key in this dict will be replaced by the corresponding values.
Example
-------------
TODO: Update this documentation
@DocReplacer({'p1': 'p1 : int\n\tFirst parameter'})
def foo(p1):
"""
Some functions.
Params:
{p1}
"""
will result in foo's docstring being:
"""
Some functions.
Params:
p1 : int
First parameter
"""
'''
self.doc_dict = doc_dict
self.auto_dedent = auto_dedent
self.allow_partial_formatting = allow_partial_formatting
def __call__(self, func):
if func.__doc__:
doc = func.__doc__
if self.auto_dedent:
doc = inspect.cleandoc(doc)
func.__doc__ = self._format(doc)
return func
def replace(self):
"""Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting
"""
doc_dict = self.doc_dict.copy()
for k, v in doc_dict.items():
if '{' and '}' in v:
self.doc_dict[k] = v.format(**doc_dict)
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.doc_dict.update(*args, **kwargs)
def _format(self, doc):
""" Formats the docstring using self.doc_dict """
if self.allow_partial_formatting:
mapping = FormatDict(self.doc_dict)
else:
mapping = self.doc_dict
formatter = string.Formatter()
return formatter.vformat(doc, (), mapping)
| mit |
alalbiol/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
pedro-aaron/stego-chi-2 | embeddingRgb.py | 1 | 2081 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Watermarkero, Mario, Ariel
"""
from PIL import Image
import random
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def marcarPixel(color, bitporinsertar):
if (color%2)==1:
if bitporinsertar==0:
color=color-1
elif (color%2)==0:
if bitporinsertar==1:
color=color+1
return color
def plotLsbRgb(img):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title('Imagen RGB')
ax1.imshow(img)
ax2.set_title('LSB RGB')
img=255*(img%2)
ax2.imshow(img)
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10,
right=0.95, hspace=0.3,wspace=0.35)
#imagen original
path="img3.jpg"
imgOriginal = np.array(Image.open(path))
nFilas, nCols, nCanales = imgOriginal.shape
#marca
key=41196
random.seed(key)
porcentajeDeimagenPorMarcar=50
sizeMarca = nCols*int(porcentajeDeimagenPorMarcar*(nFilas/100))
#marca = [random.randint(0,1) for i in range(sizeMarca)]
plotLsbRgb(imgOriginal)
#proceso de marcado
imgMarcada = imgOriginal.copy();
cont = 1 #contador del numero de bits inscrustados
#Proceso de incrustacion
for fila in range(0,nFilas):
for columna in range(0,nCols):
pixel=imgOriginal[fila,columna]
newPixel = [marcarPixel(
pixel[0],random.randint(0,1)),
marcarPixel(pixel[1],random.randint(0,1)),
marcarPixel(pixel[2],random.randint(0,1))]
imgMarcada[fila,columna] = newPixel
if cont >= sizeMarca:
break
cont = cont +1
if cont >= sizeMarca:
break
plotLsbRgb(imgMarcada)
image = Image.fromarray(imgMarcada, 'RGB')
image.save('ImagenMarcada.bmp')
print('Porciento de la imagen marcada: ' + str(porcentajeDeimagenPorMarcar)+'%')
print('bits incrustados: ' + str(sizeMarca*3))
print('Bytes incrustados: ' + str(sizeMarca*3/8))
print('KiloBytes incrustados: ' + str(sizeMarca*3/8/1024))
print('MegaBytes incrustados: ' + str(sizeMarca*3/8/1024/1024))
| mit |
breznak/NAB | nab/labeler.py | 8 | 16181 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import itertools
import numpy
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.util import (absoluteFilePaths,
getProbationPeriod,
strf,
strp,
deepmap,
createPath,
writeJSON)
def bucket(rawTimes, buffer):
"""
Buckets (groups) timestamps that are within the amount of time specified by
buffer.
"""
bucket = []
rawBuckets = []
current = None
for t in rawTimes:
if current is None:
current = t
bucket = [current]
continue
if (t - current) <= buffer:
bucket.append(t)
else:
rawBuckets.append(bucket)
current = t
bucket = [current]
if bucket:
rawBuckets.append(bucket)
return rawBuckets
def merge(rawBuckets, threshold):
"""
Merges bucketed timestamps into one timestamp (most frequent, or earliest).
"""
truths = []
passed = []
for bucket in rawBuckets:
if len(bucket) >= threshold:
truths.append(max(bucket, key=bucket.count))
else:
passed.append(bucket)
return truths, passed
def checkForOverlap(labels, buffer, labelsFileName, dataFileName):
"""
Raise a ValueError if the difference between any consecutive labels is smaller
than the buffer.
"""
for i in xrange(len(labels)-1):
if labels[i+1] - labels[i] <= buffer:
# import pdb; pdb.set_trace()
raise ValueError("The labels {} and {} in \'{}\' labels for data file "
"\'{}\' are too close to each other to be considered distinct "
"anomalies. Please relabel."
.format(labels[i], labels[i+1], labelsFileName, dataFileName))
class CorpusLabel(object):
"""
Class to store and manipulate a single set of labels for the whole
benchmark corpus.
"""
def __init__(self, path, corpus):
"""
Initializes a CorpusLabel object by getting the anomaly windows and labels.
When this is done for combining raw user labels, we skip getLabels()
because labels are not yet created.
@param path (string) Name of file containing the set of labels.
@param corpus (nab.Corpus) Corpus object.
"""
self.path = path
self.windows = None
self.labels = None
self.corpus = corpus
self.getWindows()
if "raw" not in self.path:
# Do not get labels from files in the path nab/labels/raw
self.getLabels()
def getWindows(self):
"""
Read JSON label file. Get timestamps as dictionaries with key:value pairs of
a relative path and its corresponding list of windows.
"""
def found(t, data):
f = data["timestamp"][data["timestamp"] == pandas.tslib.Timestamp(t)]
exists = (len(f) == 1)
return exists
with open(os.path.join(self.path)) as windowFile:
windows = json.load(windowFile)
self.windows = {}
for relativePath in windows.keys():
self.windows[relativePath] = deepmap(strp, windows[relativePath])
if len(self.windows[relativePath]) == 0:
continue
data = self.corpus.dataFiles[relativePath].data
if "raw" in self.path:
timestamps = windows[relativePath]
else:
timestamps = list(itertools.chain.from_iterable(windows[relativePath]))
# Check that timestamps are present in dataset
if not all([found(t,data) for t in timestamps]):
raise ValueError("In the label file %s, one of the timestamps used for "
"the datafile %s doesn't match; it does not exist in "
"the file. Timestamps in json label files have to "
"exactly match timestamps in corresponding datafiles."
% (self.path, relativePath))
def validateLabels(self):
"""
This is run at the end of the label combining process (see
scripts/combine_labels.py) to validate the resulting ground truth windows,
specifically that they are distinct (unique, non-overlapping).
"""
with open(os.path.join(self.path)) as windowFile:
windows = json.load(windowFile)
self.windows = {}
for relativePath in windows.keys():
self.windows[relativePath] = deepmap(strp, windows[relativePath])
if len(self.windows[relativePath]) == 0:
continue
num_windows = len(self.windows[relativePath])
if num_windows > 1:
if not all([(self.windows[relativePath][i+1][0]
- self.windows[relativePath][i][1]).total_seconds() >= 0
for i in xrange(num_windows-1)]):
raise ValueError("In the label file %s, windows overlap." % self.path)
def getLabels(self):
"""
Get Labels as a dictionary of key-value pairs of a relative path and its
corresponding binary vector of anomaly labels. Labels are simply a more
verbose version of the windows.
"""
self.labels = {}
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if self.windows.has_key(relativePath):
windows = self.windows[relativePath]
labels = pandas.DataFrame({"timestamp": dataSet.data["timestamp"]})
labels['label'] = 0
for t1, t2 in windows:
moreThanT1 = labels[labels["timestamp"] >= t1]
betweenT1AndT2 = moreThanT1[moreThanT1["timestamp"] <= t2]
indices = betweenT1AndT2.loc[:,"label"].index
labels["label"].values[indices.values] = 1
self.labels[relativePath] = labels
else:
print "Warning: no label for datafile",relativePath
class LabelCombiner(object):
"""
This class is used to combine labels from multiple human labelers, and the set
of manual labels (known anomalies).
The output is a single ground truth label file containing anomalies where
there is enough human agreement. The class also computes the window around
each anomaly. The exact logic is described elsewhere in the NAB
documentation.
"""
def __init__(self, labelDir, corpus,
threshold, windowSize,
probationaryPercent, verbosity):
"""
@param labelDir (string) A directory name containing user label files.
This directory should contain one label file
per human labeler.
@param corpus (Corpus) Instance of Corpus class.
@param threshold (float) A percentage between 0 and 1, specifying the
agreement threshold. It describes the level
of agreement needed between individual
labelers before a particular point in a
data file is labeled as anomalous in the
combined file.
@param windowSize (float) Estimated size of an anomaly window, as a
ratio the dataset length.
@param verbosity (int) 0, 1, or 2 to print out select labeling
metrics; 0 is none, 2 is the most.
"""
self.labelDir = labelDir
self.corpus = corpus
self.threshold = threshold
self.windowSize = windowSize
self.probationaryPercent = probationaryPercent
self.verbosity = verbosity
self.userLabels = None
self.nLabelers = None
self.knownLabels = None
self.combinedWindows = None
def __str__(self):
ans = ""
ans += "labelDir: %s\n" % self.labelDir
ans += "corpus: %s\n" % self.corpus
ans += "number of labelers: %d\n" % self.nLabelers
ans += "agreement threshold: %d\n" % self.threshold
return ans
def write(self, labelsPath, windowsPath):
"""Write the combined labels and windows to destination directories."""
if not os.path.isdir(labelsPath):
createPath(labelsPath)
if not os.path.isdir(windowsPath):
createPath(windowsPath)
writeJSON(labelsPath, self.labelTimestamps)
writeJSON(windowsPath, self.combinedWindows)
def combine(self):
"""Combine raw and known labels in anomaly windows."""
self.getRawLabels()
self.combineLabels()
self.editPoorLabels()
self.applyWindows()
self.checkWindows()
def getRawLabels(self):
"""Collect the raw user labels from specified directory."""
labelPaths = absoluteFilePaths(self.labelDir)
self.userLabels = []
self.knownLabels = []
for path in labelPaths:
if "known" in path:
self.knownLabels.append(CorpusLabel(path, self.corpus))
else:
self.userLabels.append(CorpusLabel(path, self.corpus))
self.nLabelers = len(self.userLabels)
if self.nLabelers == 0:
raise ValueError("No users labels found")
def combineLabels(self):
"""
Combines raw user labels to create set of true anomaly labels.
A buffer is used to bucket labels that identify the same anomaly. The buffer
is half the estimated window size of an anomaly -- approximates an average
of two anomalies per dataset, and no window can have > 1 anomaly.
After bucketing, a label becomes a true anomaly if it was labeled by a
proportion of the users greater than the defined threshold. Then the bucket
is merged into one timestamp -- the ground truth label.
The set of known anomaly labels are added as well. These have been manually
labeled because we know the direct causes of the anomalies. They are added
as if they are the result of the bucket-merge process.
If verbosity > 0, the dictionary passedLabels -- the raw labels that did not
pass the threshold qualification -- is printed to the console.
"""
def setTruthLabels(dataSet, trueAnomalies):
"""Returns the indices of the ground truth anomalies for a data file."""
timestamps = dataSet.data["timestamp"]
labels = numpy.array(timestamps.isin(trueAnomalies), dtype=int)
return [i for i in range(len(labels)) if labels[i]==1]
self.labelTimestamps = {}
self.labelIndices = {}
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if ("Known" in relativePath) or ("artificial" in relativePath):
knownAnomalies = self.knownLabels[0].windows[relativePath]
self.labelTimestamps[relativePath] = [str(t) for t in knownAnomalies]
self.labelIndices[relativePath] = setTruthLabels(dataSet, knownAnomalies)
continue
# Calculate the window buffer -- used for bucketing labels identifying
# the same anomaly.
granularity = dataSet.data["timestamp"][1] - dataSet.data["timestamp"][0]
buffer = datetime.timedelta(minutes=
granularity.total_seconds()/60 * len(dataSet.data) * self.windowSize/10)
rawTimesLists = []
userCount = 0
for user in self.userLabels:
if relativePath in user.windows:
# the user has labels for this file
checkForOverlap(
user.windows[relativePath], buffer, user.path, relativePath)
rawTimesLists.append(user.windows[relativePath])
userCount += 1
if not rawTimesLists:
# no labeled anomalies for this data file
self.labelTimestamps[relativePath] = []
self.labelIndices[relativePath] = setTruthLabels(dataSet, [])
continue
else:
rawTimes = list(itertools.chain.from_iterable(rawTimesLists))
rawTimes.sort()
# Bucket and merge the anomaly timestamps.
threshold = userCount * self.threshold
trueAnomalies, passedAnomalies = merge(
bucket(rawTimes, buffer), threshold)
self.labelTimestamps[relativePath] = [str(t) for t in trueAnomalies]
self.labelIndices[relativePath] = setTruthLabels(dataSet, trueAnomalies)
if self.verbosity>0:
print "----"
print "For %s the passed raw labels and qualified true labels are,"\
" respectively:" % relativePath
print passedAnomalies
print trueAnomalies
return self.labelTimestamps, self.labelIndices
def editPoorLabels(self):
"""
This edits labels that have been flagged for manual revision. From
inspecting the data and anomaly windows, we have determined some combined
labels should be revised, or not included in the ground truth labels.
"""
count = 0
for relativePath, indices in self.labelIndices.iteritems():
if "iio_us-east-1_i-a2eb1cd9_NetworkIn" in relativePath:
self.labelIndices[relativePath] = [249, 339]
count += len(indices)
if self.verbosity > 0:
print "============================================================="
print "Total ground truth anomalies in benchmark dataset =", count
def applyWindows(self):
"""
This takes all the true anomalies, as calculated by combineLabels(), and
adds a standard window. The window length is the class variable windowSize,
and the location is centered on the anomaly timestamp.
If verbosity = 2, the window metrics are printed to the console.
"""
allWindows = {}
for relativePath, anomalies in self.labelIndices.iteritems():
data = self.corpus.dataFiles[relativePath].data
length = len(data)
num = len(anomalies)
if num:
windowLength = int(self.windowSize * length / len(anomalies))
else:
windowLength = int(self.windowSize * length)
if self.verbosity==2:
print "----"
print "Window metrics for file", relativePath
print "file length =", length, ";" \
"number of windows =", num, ";" \
"window length =", windowLength
windows = []
for a in anomalies:
front = max(a - windowLength/2, 0)
back = min(a + windowLength/2, length-1)
windowLimit = [strf(data["timestamp"][front]),
strf(data["timestamp"][back])]
windows.append(windowLimit)
allWindows[relativePath] = windows
self.combinedWindows = allWindows
def checkWindows(self):
"""
This takes the anomaly windows and checks for overlap with both each other
and with the probationary period. Overlapping windows are merged into a
single window. Windows overlapping with the probationary period are deleted.
"""
for relativePath, windows in self.combinedWindows.iteritems():
numWindows = len(windows)
if numWindows > 0:
fileLength = self.corpus.dataFiles[relativePath].data.shape[0]
probationIndex = getProbationPeriod(
self.probationaryPercent, fileLength)
probationTimestamp = self.corpus.dataFiles[relativePath].data[
"timestamp"][probationIndex]
if (pandas.to_datetime(windows[0][0])
-probationTimestamp).total_seconds() < 0:
del windows[0]
print ("The first window in {} overlaps with the probationary period "
", so we're deleting it.".format(relativePath))
i = 0
while len(windows)-1 > i:
if (pandas.to_datetime(windows[i+1][0])
- pandas.to_datetime(windows[i][1])).total_seconds() <= 0:
# merge windows
windows[i] = [windows[i][0], windows[i+1][1]]
del windows[i+1]
i += 1
| agpl-3.0 |
rgllm/uminho | 04/CN/TP3/src/src/parser/PsoTools.py | 1 | 4783 | import itertools
import json
import matplotlib.pyplot as plt
from matplotlib import style
import os
style.use('ggplot')
import numpy as np
from pprint import pprint
from os.path import basename
xrange=range
class PsoTools(object):
def __init__(self):
pass
# Convert a data raw file to a json file
def rawToJson(self, inputFilePath, outputFilePath):
inFile = open(inputFilePath, mode='r')
outFile = open(outputFilePath, mode='w')
meta_data = dict.fromkeys(['nb_customers', 'nb_depots',
'vehicle_cap', 'vehicle_cost', 'cost_type'])
cust_dict = dict.fromkeys(['x', 'y', 'demand'])
dep_dict = dict.fromkeys(['x', 'y', 'capacity'])
customers = {}
depots = {}
# Number of customers and available depots
nb_customers = int(inFile.readline())
nb_depots = int(inFile.readline())
meta_data['nb_customers'] = nb_customers
meta_data['nb_depots'] = nb_depots
inFile.readline() # Empty line
# Depots cordinates
for i, line in enumerate(inFile):
if i < nb_depots:
x = float(line.split()[0])
y = float(line.split()[1])
depots['d'+str(i)] = {}
depots['d'+str(i)]['x'] = x
depots['d'+str(i)]['y'] = y
else:
i=i-1
break
# Customers cordinates and vehicule capacity
for i, line in enumerate(inFile):
if i < nb_customers:
x = float(line.split()[0])
y = float(line.split()[1])
customers['c'+str(i)] = {}
customers['c'+str(i)]['x'] = x
customers['c'+str(i)]['y'] = y
else:
break
# Vehicules and depots capacity
for i, line in enumerate(inFile):
if i == 0:
vehicle_cap = float(line)
meta_data['vehicle_cap'] = vehicle_cap
elif i == 1:
pass
elif i < nb_depots+2:
depot_cap = float(line)
depots['d'+str(i-2)]['capacity'] = depot_cap
else:
break
# Customers demands
for i, line in enumerate(inFile):
if i < nb_customers:
demand = float(line)
customers['c'+str(i)]['demand'] = demand
else:
break
# Depots openning costs
for i, line in enumerate(inFile):
if i < nb_depots:
openning_cost = float(line)
depots['d'+str(i)]['opening_cost'] = openning_cost
elif i == nb_depots:
pass
elif i == nb_depots+1:
vehicle_cost = float(line)
meta_data['vehicle_cost'] = vehicle_cost
elif i == nb_depots+2:
pass
elif i == nb_depots+3:
cost_type = float(line)
meta_data['cost_type'] = cost_type
else:
break
final_output = {}
final_output['customers'] = customers
final_output['depots'] = depots
final_output['meta_data'] = meta_data
json.dump(final_output, outFile, indent=4)
inFile.close()
outFile.close()
# Plot the customers on the map
def plotCustomers(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
coords_cust = np.zeros(shape=(nb_customers,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot the depots on the map
def plotDepots(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_depots = data['meta_data']['nb_depots']
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot both depots and customers on the map
def plotAll(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
nb_depots = data['meta_data']['nb_depots']
coords_cust = np.zeros(shape=(nb_customers,2))
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
filename = str(basename(os.path.splitext(jsonInputFile)[0]) + '.pdf')
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='s', s=10, linewidth=5)
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='8', s=10, linewidth=5)
plt.savefig(filename, format='pdf')
#~ plt.show()
| mit |
gpersistence/tstop | python/persistence/PartitionData.py | 1 | 8153 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import os
import sys
import argparse
from math import ceil
import numpy
from sklearn.cross_validation import StratifiedKFold
from Datatypes.JSONObject import load_data, save_data
from Datatypes.Segments import SegmentInfo
from Datatypes.Configuration import Configuration
from Datatypes.TrainTestPartitions import TrainTestPartition, TrainTestPartitions
def PartitionData(segment_info, split, avoid_overlap=False, segment_size=0,
file_based=False, preserve_labels=False, override_preset=False, surpress_warning=False, seed=None) :
'''
Accepts a list of Datatype.Segments.SegmentInfo and a float between 0 and 1,
and outputs a pair of lists of indices, (train, test) corresponding
to a parition of the input list
len(train) approximates split * len(segment_info)
Intersection of train and test is an empty set
Union of train and test is not guaranteed to be range(len(segment_info))
Optional arguments:
avoid_overlap omits entries in test that would have overlapping data with entries in train,
as indicated by the range [segment_start:segment_start+segment_size]
segment_size interacts with avoid overlap, because only segment_start is contained in the
SegmentInfo class
file_based creates partitions where segments with the same filename for source data are
in the same partition
preserve_label tries to split the populations of labels evenly
'''
segment_count = len(segment_info)
segment_range = range(segment_count)
# check to see if we have a preset train / test split for all data and we aren't overriding that
if not override_preset and [0 for s in segment_info if s.learning == None] == [] :
return TrainTestPartition([i for i in segment_range if segment_info[i].learning == 'train'],
[i for i in segment_range if segment_info[i].learning == 'test'], None)
train_goal_len = int(ceil(segment_count * split))
if preserve_labels :
labels = [s.max_label() for s in segment_info]
label_set = list(set(labels))
label_count = [(l0,len([l for l in labels if l == l0])) for l0 in label_set]
label_goal = [(str(l), int(round(c * split))) for (l,c) in label_count]
for ((l0,g),(l1,c)) in zip(label_goal, label_count) :
if (g == 0) or (g == c) and not surpress_warning:
print "PartitionData warning: not enough entries (%d) of label %s to properly make a train / test split of ratio %s" % (c, l0, split)
label_goal = dict(label_goal)
train = []
test = []
if seed != None :
random.seed(seed)
state = random.getstate()
if file_based :
files = list(set([s.filename for s in segment_info]))
random.shuffle(files)
for f in files :
f_indices = [x for (x,y) in zip(segment_range, segment_info) if y.filename == f]
if preserve_labels :
f_labels = [str(labels[i]) for i in f_indices]
extend_train = True
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
if count > label_goal[l] :
extend_train = False
break
if extend_train :
train.extend(f_indices)
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
label_goal[l] = label_goal[l] - count
else :
test.extend(f_indices)
else :
if len(train) + len(f_indices) < train_goal_len :
train.extend(f_indices)
else :
test.extend(f_indices)
else :
random.shuffle(segment_range)
if preserve_labels :
for i in segment_range:
l = str(labels[i])
if label_goal[l] > 0 :
train.append(i)
label_goal[l] = label_goal[l] - 1
else :
test.append(i)
else :
train = segment_range[0:train_goal_len]
test = segment_range[train_goal_len:]
return TrainTestPartition(train,test,state)
def generate_partitions(config, segment_info, cv_iterations=0, seed=None) :
partition = PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=seed)
all_labels = [segment_info[i].max_label() for i in partition.train]
if cv_iterations > 0 :
skf = StratifiedKFold(all_labels, n_folds=cv_iterations)
cross_validation = [TrainTestPartition([partition.train[i] for i in train_index],
[partition.train[i] for i in test_index], None) \
for train_index, test_index in skf]
else :
cross_validation = None
learning_trials = [PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=None) for i in range(config.learning_iterations)]
return TrainTestPartitions(config, segment_info, cross_validation, learning_trials)
if __name__ == "__main__" :
parser = argparse.ArgumentParser("Tool to generate train / test splits for testing and cross validation")
parser.add_argument("--segments", "-i")
parser.add_argument("--outfile", "-o")
parser.add_argument("--learning-split", "-s", type=float)
parser.add_argument("--learning-iterations", "-I", type=int)
parser.add_argument("--cv-iterations", "-v", default=5, type=int)
parser.add_argument("--seed", "-S")
args = parser.parse_args(sys.argv[1:])
segments_json = load_data(args.segments, 'segments', None, None, sys.argv[0] + " : ")
if segments_json == None :
print "Could not load Segments from %s" % (args.segments,)
sys.exit(1)
segment_info = [SegmentInfo.fromJSONDict(s) for s in segments_json['segments']]
config = Configuration.fromJSONDict(segments_json['config'])
if args.learning_split != None :
config.learning_split = args.learning_split
if args.learning_iterations != None :
config.learning_iterations = args.learning_iterations
output = generate_partitions(config, segment_info, cv_iterations=args.cv_iterations, seed=args.seed)
if args.outfile == None :
args.outfile = TrainTestPartitions.get_partition_filename(config)
print "Writing %s" % (args.outfile,)
save_data(args.outfile, output.toJSONDict())
| gpl-3.0 |
aabadie/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/io/parser/test_textreader.py | 4 | 11387 | # -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
import pytest
from pandas.compat import StringIO, BytesIO, map
from pandas import compat
import os
import sys
from numpy import nan
import numpy as np
from pandas import DataFrame
from pandas.io.parsers import (read_csv, TextFileReader)
from pandas.util.testing import assert_frame_equal
import pandas.util.testing as tm
from pandas._libs.parsers import TextReader
import pandas._libs.parsers as parser
class TestTextReader(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath('io', 'parser', 'data')
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f)
reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
@tm.capture_stderr
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
pytest.raises(parser.ParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object),
1: np.array(['b', 'e', 'h', 'm'], dtype=object),
2: np.array(['c', 'f', 'i', 'n'], dtype=object)}
assert_array_dicts_equal(result, expected)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
assert 'Skipping line 4' in val
assert 'Skipping line 6' in val
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
assert header == expected
recs = reader.read()
expected = {0: np.array([1, 4], dtype=np.int64),
1: np.array([2, 5], dtype=np.int64),
2: np.array([3, 6], dtype=np.int64)}
assert_array_dicts_equal(recs, expected)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
assert result[0].dtype == 'S5'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
assert (result[0] == ex_values).all()
assert result[1].dtype == 'i4'
reader = _make_reader(dtype='S4')
result = reader.read()
assert result[0].dtype == 'S4'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
assert (result[0] == ex_values).all()
assert result[1].dtype == 'S4'
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'S1'
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'O'
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'O'
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
assert len(result) == 2
assert (result[1] == exp[1]).all()
assert (result[2] == exp[2]).all()
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4], dtype=np.int64),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n' * 2 + '\n' * 2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def test_empty_csv_input(self):
# GH14867
df = read_csv(StringIO(), chunksize=20, header=None,
names=['a', 'b', 'c'])
assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert tm.assert_numpy_array_equal(np.asarray(v),
np.asarray(right[k]))
| bsd-3-clause |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch09/fft.py | 24 | 3673 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sys
import os
import glob
import numpy as np
import scipy
import scipy.io.wavfile
from utils import GENRE_DIR, CHART_DIR
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
def write_fft(fft_features, fn):
"""
Write the FFT features to separate files to speed up processing.
"""
base_fn, ext = os.path.splitext(fn)
data_fn = base_fn + ".fft"
np.save(data_fn, fft_features)
print("Written "%data_fn)
def create_fft(fn):
sample_rate, X = scipy.io.wavfile.read(fn)
fft_features = abs(scipy.fft(X)[:1000])
write_fft(fft_features, fn)
def read_fft(genre_list, base_dir=GENRE_DIR):
X = []
y = []
for label, genre in enumerate(genre_list):
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
file_list = glob.glob(genre_dir)
assert(file_list), genre_dir
for fn in file_list:
fft_features = np.load(fn)
X.append(fft_features[:2000])
y.append(label)
return np.array(X), np.array(y)
def plot_wav_fft(wav_filename, desc=None):
plt.clf()
plt.figure(num=None, figsize=(6, 4))
sample_rate, X = scipy.io.wavfile.read(wav_filename)
spectrum = np.fft.fft(X)
freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)
plt.subplot(211)
num_samples = 200.0
plt.xlim(0, num_samples / sample_rate)
plt.xlabel("time [s]")
plt.title(desc or wav_filename)
plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
plt.grid(True)
plt.subplot(212)
plt.xlim(0, 5000)
plt.xlabel("frequency [Hz]")
plt.xticks(np.arange(5) * 1000)
if desc:
desc = desc.strip()
fft_desc = desc[0].lower() + desc[1:]
else:
fft_desc = wav_filename
plt.title("FFT of %s" % fft_desc)
plt.plot(freq, abs(spectrum), linewidth=5)
plt.grid(True)
plt.tight_layout()
rel_filename = os.path.split(wav_filename)[1]
plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
bbox_inches='tight')
plt.show()
def plot_wav_fft_demo():
plot_wav_fft("sine_a.wav", "400Hz sine wave")
plot_wav_fft("sine_b.wav", "3,000Hz sine wave")
plot_wav_fft("sine_mix.wav", "Mixed sine wave")
def plot_specgram(ax, fn):
sample_rate, X = scipy.io.wavfile.read(fn)
ax.specgram(X, Fs=sample_rate, xextent=(0, 30))
def plot_specgrams(base_dir=CHART_DIR):
"""
Plot a bunch of spectrograms of wav files in different genres
"""
plt.clf()
genres = ["classical", "jazz", "country", "pop", "rock", "metal"]
num_files = 3
f, axes = plt.subplots(len(genres), num_files)
for genre_idx, genre in enumerate(genres):
for idx, fn in enumerate(glob.glob(os.path.join(GENRE_DIR, genre, "*.wav"))):
if idx == num_files:
break
axis = axes[genre_idx, idx]
axis.yaxis.set_major_formatter(EngFormatter())
axis.set_title("%s song %i" % (genre, idx + 1))
plot_specgram(axis, fn)
specgram_file = os.path.join(base_dir, "Spectrogram_Genres.png")
plt.savefig(specgram_file, bbox_inches="tight")
plt.show()
if __name__ == "__main__":
# for fn in glob.glob(os.path.join(sys.argv[1], "*.wav")):
# create_fft(fn)
# plot_decomp()
if len(sys.argv) > 1:
plot_wav_fft(sys.argv[1], desc="some sample song")
else:
plot_wav_fft_demo()
plot_specgrams()
| mit |
toastedcornflakes/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
mupif/mupif | mupif/Field.py | 1 | 42683 | #
# MuPIF: Multi-Physics Integration Framework
# Copyright (C) 2010-2015 Borek Patzak
#
# Czech Technical University, Faculty of Civil Engineering,
# Department of Structural Mechanics, 166 29 Prague, Czech Republic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from builtins import range
from builtins import object
from . import Cell
from . import FieldID
from . import ValueType
from . import BBox
from . import APIError
from . import MupifObject
from . import Mesh
from .Physics import PhysicalQuantities
from .Physics.PhysicalQuantities import PhysicalQuantity
from numpy import array, arange, random, zeros
import numpy
import copy
import Pyro4
from enum import IntEnum
import logging
log = logging.getLogger()
try:
import cPickle as pickle # faster serialization if available
except:
import pickle
# import logging - never use it here, it causes cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute
# lookup thread.lock failed
# debug flag
debug = 0
class FieldType(IntEnum):
"""
Represent the supported values of FieldType, i.e. FT_vertexBased or FT_cellBased.
"""
FT_vertexBased = 1
FT_cellBased = 2
@Pyro4.expose
class Field(MupifObject.MupifObject, PhysicalQuantity):
"""
Representation of field. Field is a scalar, vector, or tensorial
quantity defined on a spatial domain. The field, however is assumed
to be fixed at certain time. The field can be evaluated in any spatial point
belonging to underlying domain.
Derived classes will implement fields defined on common discretizations,
like fields defined on structured/unstructured FE meshes, FD grids, etc.
.. automethod:: __init__
.. automethod:: _evaluate
"""
def __init__(self, mesh, fieldID, valueType, units, time, values=None, fieldType=FieldType.FT_vertexBased, objectID=0, metaData={}):
"""
Initializes the field instance.
:param Mesh.Mesh mesh: Instance of a Mesh class representing the underlying discretization
:param FieldID fieldID: Field type (displacement, strain, temperature ...)
:param ValueType valueType: Type of field values (scalar, vector, tensor). Tensor is a tuple of 9 values. It is changed to 3x3 for VTK output automatically.
:param Physics.PhysicalUnits units: Field value units
:param Physics.PhysicalQuantity time: Time associated with field values
:param values: Field values (format dependent on a particular field type, however each individual value should be stored as tuple, even scalar value)
:type values: list of tuples representing individual values
:param FieldType fieldType: Optional, determines field type (values specified as vertex or cell values), default is FT_vertexBased
:param int objectID: Optional ID of problem object/subdomain to which field is related, default = 0
:param dict metaData: Optionally pass metadata for merging
"""
super(Field, self).__init__()
self.mesh = mesh
self.fieldID = fieldID
self.valueType = valueType
self.time = time
self.uri = None # pyro uri; used in distributed setting
# self.log = logging.getLogger()
self.fieldType = fieldType
self.objectID = objectID
if values is None:
if self.fieldType == FieldType.FT_vertexBased:
ncomponents = mesh.getNumberOfVertices()
else:
ncomponents = mesh.getNumberOfCells()
self.value = zeros((ncomponents, self.getRecordSize()))
else:
self.value = values
if PhysicalQuantities.isPhysicalUnit(units):
self.unit = units
else:
self.unit = PhysicalQuantities.findUnit(units)
self.setMetadata('Units', self.unit.name())
self.setMetadata('Type', 'mupif.Field.Field')
self.setMetadata('Type_ID', str(self.fieldID))
self.setMetadata('FieldType', str(fieldType))
self.setMetadata('ValueType', str(self.valueType))
self.updateMetadata(metaData)
@classmethod
def loadFromLocalFile(cls, fileName):
"""
Alternative constructor which loads instance directly from a Pickle module.
:param str fileName: File name
:return: Returns Field instance
:rtype: Field
"""
return pickle.load(open(fileName, 'rb'))
def getRecordSize(self):
"""
Return the number of scalars per value, depending on :obj:`valueType` passed when constructing the instance.
:return: number of scalars (1,3,9 respectively for scalar, vector, tensor)
:rtype: int
"""
if self.valueType == ValueType.Scalar:
return 1
elif self.valueType == ValueType.Vector:
return 3
elif self.valueType == ValueType.Tensor:
return 9
else:
raise ValueError("Invalid value of Field.valueType (%d)." % self.valueType)
def getMesh(self):
"""
Obtain mesh.
:return: Returns a mesh of underlying discretization
:rtype: Mesh.Mesh
"""
return self.mesh
def getValueType(self):
"""
Returns ValueType of the field, e.g. scalar, vector, tensor.
:return: Returns value type of the receiver
:rtype: ValueType
"""
return self.valueType
def getFieldID(self):
"""
Returns FieldID, e.g. FID_Displacement, FID_Temperature.
:return: Returns field ID
:rtype: FieldID
"""
return self.fieldID
def getFieldIDName(self):
"""
Returns name of the field.
:return: Returns fieldID name
:rtype: string
"""
return self.fieldID.name
def getFieldType(self):
"""
Returns receiver field type (values specified as vertex or cell values)
:return: Returns fieldType id
:rtype: FieldType
"""
return self.fieldType
def getTime(self):
"""
Get time of the field.
:return: Time of field data
:rtype: Physics.PhysicalQuantity
"""
return self.time
def evaluate(self, positions, eps=0.0):
"""
Evaluates the receiver at given spatial position(s).
:param positions: 1D/2D/3D position vectors
:type positions: tuple, a list of tuples
:param float eps: Optional tolerance for probing whether the point belongs to a cell (should really not be used)
:return: field value(s)
:rtype: Physics.PhysicalQuantity with given value or tuple of values
"""
# test if positions is a list of positions
if isinstance(positions, list):
ans = []
for pos in positions:
ans.append(self._evaluate(pos, eps))
return PhysicalQuantity(ans, self.unit)
else:
# single position passed
return PhysicalQuantity(self._evaluate(positions, eps), self.unit)
def _evaluate(self, position, eps):
"""
Evaluates the receiver at a single spatial position.
:param tuple position: 1D/2D/3D position vector
:param float eps: Optional tolerance
:return: field value
:rtype: tuple of doubles
.. note:: This method has some issues related to https://sourceforge.net/p/mupif/tickets/22/ .
"""
cells = self.mesh.giveCellLocalizer().giveItemsInBBox(BBox.BBox([c-eps for c in position], [c+eps for c in position]))
# answer=None
if len(cells):
if self.fieldType == FieldType.FT_vertexBased:
for icell in cells:
try:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
answer = icell.interpolate(position, [self.value[i.number] for i in icell.getVertices()])
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
raise
return answer
except ZeroDivisionError:
print('ZeroDivisionError?')
log.debug(icell.number)
log.debug(position)
icell.debug = 1
log.debug(icell.containsPoint(position), icell.glob2loc(position))
log.error('Field::evaluate - no source cell found for position %s' % str(position))
for icell in cells:
log.debug(icell.number)
log.debug(icell.containsPoint(position))
log.debug(icell.glob2loc(position))
else: # if (self.fieldType == FieldType.FT_vertexBased):
# in case of cell based fields do compute average of cell values containing point
# this typically happens when point is on the shared edge or vertex
count = 0
for icell in cells:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
tmp = self.value[icell.number]
if count == 0:
answer = list(tmp)
else:
for i in answer:
answer = [x+y for x in answer for y in tmp]
count += 1
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
log.error(icell.getVertices())
raise
# end loop over icells
if count == 0:
log.error('Field::evaluate - no source cell found for position %s', str(position))
# for icell in cells:
# log.debug(icell.number, icell.containsPoint(position), icell.glob2loc(position))
else:
answer = [x/count for x in answer]
return answer
else:
# no source cell found
log.error('Field::evaluate - no source cell found for position ' + str(position))
raise ValueError('Field::evaluate - no source cell found for position ' + str(position))
def getVertexValue(self, vertexID):
"""
Returns the value associated with a given vertex.
:param int vertexID: Vertex identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_vertexBased:
return PhysicalQuantity(self.value[vertexID], self.unit)
else:
raise TypeError('Attempt to acces vertex value of cell based field, use evaluate instead')
def getCellValue(self, cellID):
"""
Returns the value associated with a given cell.
:param int cellID: Cell identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_cellBased:
return PhysicalQuantity(self.value[cellID], self.unit)
else:
raise TypeError('Attempt to acces cell value of vertex based field, use evaluate instead')
def _giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
Depreceated, use getVertexValue() or getCellValue()
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: Physics.PhysicalQuantity
"""
return PhysicalQuantity(self.value[componentID], self.unit)
def giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: tuple
"""
return self.value[componentID]
def setValue(self, componentID, value):
"""
Sets the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:param tuple value: Value to be set for a given component, should have the same units as receiver
.. Note:: If a mesh has mapping attached (a mesh view) then we have to remember value locally and record change. The source field values are updated after commit() method is invoked.
"""
self.value[componentID] = value
def commit(self):
"""
Commits the recorded changes (via setValue method) to a primary field.
"""
def getObjectID(self):
"""
Returns field objectID.
:return: Object's ID
:rtype: int
"""
return self.objectID
def getUnits(self):
"""
:return: Returns units of the receiver
:rtype: Physics.PhysicalUnits
"""
return self.unit
def merge(self, field):
"""
Merges the receiver with given field together. Both fields should be on different parts of the domain (can also overlap), but should refer to same underlying discretization, otherwise unpredictable results can occur.
:param Field field: given field to merge with.
"""
# first merge meshes
mesh = copy.deepcopy(self.mesh)
mesh.merge(field.mesh)
log.debug(mesh)
# merge the field values
# some type checking first
if self.fieldType != field.fieldType:
raise TypeError("Field::merge: fieldType of receiver and parameter is different")
if self.fieldType == FieldType.FT_vertexBased:
values = [0]*mesh.getNumberOfVertices()
for v in range(self.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(self.mesh.getVertex(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(field.mesh.getVertex(v).label)] = field.value[v]
else:
values = [0]*mesh.getNumberOfCells()
for v in range(self.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(self.mesh.giveCell(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(field.mesh.giveCell(v).label)] = field.value[v]
self.mesh = mesh
self.value = values
def field2VTKData (self, name=None, lookupTable=None):
"""
Creates VTK representation of the receiver. Useful for visualization. Requires pyvtk module.
:param str name: human-readable name of the field
:param pyvtk.LookupTable lookupTable: color lookup table
:return: Instance of pyvtk
:rtype: pyvtk.VtkData
"""
import pyvtk
if name is None:
name = self.getFieldIDName()
if lookupTable and not isinstance(lookupTable, pyvtk.LookupTable):
log.info('ignoring lookupTable which is not a pyvtk.LookupTable instance.')
lookupTable = None
if lookupTable is None:
lookupTable=pyvtk.LookupTable([(0, .231, .298, 1.0), (.4, .865, .865, 1.0), (.8, .706, .016, 1.0)], name='coolwarm')
# Scalars use different name than 'coolwarm'. Then Paraview uses its own color mapping instead of taking
# 'coolwarm' from *.vtk file. This prevents setting Paraview's color mapping.
scalarsKw = dict(name=name, lookup_table='default')
else:
scalarsKw = dict(name=name, lookup_table=lookupTable.name)
# see http://cens.ioc.ee/cgi-bin/cvsweb/python/pyvtk/examples/example1.py?rev=1.3 for an example
vectorsKw = dict(name=name) # vectors don't have a lookup_table
if self.fieldType == FieldType.FT_vertexBased:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Vectors(self.value, **vectorsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
else:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Vectors(self.value, **vectorsKw),lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
def getMartixForTensor(self, values):
"""
Reshape values to a list with 3x3 arrays. Usable for VTK export.
:param list values: List containing tuples of 9 values, e.g. [(1,2,3,4,5,6,7,8,9), (1,2,3,4,5,6,7,8,9), ...]
:return: List containing 3x3 matrices for each tensor
:rtype: list
"""
tensor = []
for i in values:
tensor.append(numpy.reshape(i, (3, 3)))
return tensor
def dumpToLocalFile(self, fileName, protocol=pickle.HIGHEST_PROTOCOL):
"""
Dump Field to a file using a Pickle serialization module.
:param str fileName: File name
:param int protocol: Used protocol - 0=ASCII, 1=old binary, 2=new binary
"""
pickle.dump(self, open(fileName, 'wb'), protocol)
def field2Image2D(self, plane='xy', elevation=(-1.e-6, 1.e-6), numX=10, numY=20, interp='linear', fieldComponent=0, vertex=True, colorBar='horizontal', colorBarLegend='', barRange=(None, None), barFormatNum='%.3g', title='', xlabel='', ylabel='', fileName='', show=True, figsize=(8, 4), matPlotFig=None):
"""
Plots and/or saves 2D image using a matplotlib library. Works for structured and unstructured 2D/3D fields. 2D/3D fields need to define plane. This method gives only basic viewing options, for aesthetic and more elaborated output use e.g. VTK field export with
postprocessors such as ParaView or Mayavi. Idea from https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#id1
:param str plane: what plane to extract from field, valid values are 'xy', 'xz', 'yz'
:param tuple elevation: range of third coordinate. For example, in plane='xy' is grabs z coordinates in the range
:param int numX: number of divisions on x graph axis
:param int numY: number of divisions on y graph axis
:param str interp: interpolation type when transferring to a grid. Valid values 'linear', 'nearest' or 'cubic'
:param int fieldComponent: component of the field
:param bool vertex: if vertices shoud be plot as points
:param str colorBar: color bar details. Valid values '' for no colorbar, 'vertical' or 'horizontal'
:param str colorBarLegend: Legend for color bar. If '', current field name and units are printed. None prints nothing.
:param tuple barRange: min and max bar range. If barRange=('NaN','NaN'), it is adjusted automatically
:param str barFormatNum: format of color bar numbers
:param str title: title
:param str xlabel: x axis label
:param str ylabel: y axis label
:param str fileName: if nonempty, a filename is written to the disk, usually png, pdf, ps, eps and svg are supported
:param bool show: if the plot should be showed
:param tuple figsize: size of canvas in inches. Affects only showing a figure. Image to a file adjust one side automatically.
:param obj matPlotFig: False means plot window remains in separate thread, True waits until a plot window becomes closed
:return: handle to matPlotFig
:rtype: matPlotFig
"""
try:
import numpy as np
import math
from scipy.interpolate import griddata
import matplotlib
matplotlib.use('TkAgg') # Qt4Agg gives an empty, black window
import matplotlib.pyplot as plt
except ImportError as e:
log.error('Skipping field2Image2D due to missing modules: %s' % e)
return None
# raise
if self.fieldType != FieldType.FT_vertexBased:
raise APIError.APIError('Only FieldType.FT_vertexBased is now supported')
mesh = self.getMesh()
numVertices = mesh.getNumberOfVertices()
indX = 0
indY = 0
elev = 0
if plane == 'xy':
indX = 0
indY = 1
elev = 2
elif plane == 'xz':
indX = 0
indY = 2
elev = 1
elif plane == 'yz':
indX = 1
indY = 2
elev = 0
# find eligible vertex points and values
vertexPoints = []
vertexValue = []
for i in range(0, numVertices):
coords = mesh.getVertex(i).getCoordinates()
# print(coords)
value = self.giveValue(i)[fieldComponent]
if elevation[1] > coords[elev] > elevation[0]:
vertexPoints.append((coords[indX], coords[indY]))
vertexValue.append(value)
if len(vertexPoints) == 0:
log.info('No valid vertex points found, putting zeros on domain 1 x 1')
for i in range(5):
vertexPoints.append((i % 2, i/4.))
vertexValue.append(0)
# for i in range (0, len(vertexPoints)):
# print (vertexPoints[i], vertexValue[i])
vertexPointsArr = np.array(vertexPoints)
vertexValueArr = np.array(vertexValue)
xMin = vertexPointsArr[:, 0].min()
xMax = vertexPointsArr[:, 0].max()
yMin = vertexPointsArr[:, 1].min()
yMax = vertexPointsArr[:, 1].max()
# print(xMin, xMax, yMin, yMax)
grid_x, grid_y = np.mgrid[xMin:xMax:complex(0, numX), yMin:yMax:complex(0, numY)]
grid_z1 = griddata(vertexPointsArr, vertexValueArr, (grid_x, grid_y), interp)
# print (grid_z1.T)
plt.ion() # ineractive mode
if matPlotFig is None:
matPlotFig = plt.figure(figsize=figsize)
# plt.xlim(xMin, xMax)
# plt.ylim(yMin, yMax)
plt.clf()
plt.axis((xMin, xMax, yMin, yMax))
image = plt.imshow(grid_z1.T, extent=(xMin, xMax, yMin, yMax), origin='lower', aspect='equal')
# plt.margins(tight=True)
# plt.tight_layout()
# plt.margins(x=-0.3, y=-0.3)
if colorBar:
cbar = plt.colorbar(orientation=colorBar, format=barFormatNum)
if colorBarLegend is not None:
if colorBarLegend == '':
colorBarLegend = self.getFieldIDName() + '_' + str(fieldComponent)
if self.unit is not None:
colorBarLegend = colorBarLegend + ' (' + self.unit.name() + ')'
cbar.set_label(colorBarLegend, rotation=0 if colorBar == 'horizontal' else 90)
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if vertex == 1:
plt.scatter(vertexPointsArr[:, 0], vertexPointsArr[:, 1], marker='o', c='b', s=5, zorder=10)
# plt.axis('equal')
# plt.gca().set_aspect('equal', adjustable='box-forced')
if isinstance(barRange[0], float) or isinstance(barRange[0], int):
image.set_clim(vmin=barRange[0], vmax=barRange[1])
if fileName:
plt.savefig(fileName, bbox_inches='tight')
if show:
matPlotFig.canvas.draw()
# plt.ioff()
# plt.show(block=True)
return matPlotFig
def field2Image2DBlock(self):
"""
Block an open window from matPlotLib. Waits until closed.
"""
import matplotlib.pyplot as plt
plt.ioff()
plt.show(block=True)
def toHdf5(self, fileName, group='component1/part1'):
"""
Dump field to HDF5, in a simple format suitable for interoperability (TODO: document).
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be saved under.
The HDF hierarchy is like this::
group
|
+--- mesh_01 {hash=25aa0aa04457}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- mesh_02 {hash=17809e2b86ea}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- ...
+--- field_01
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_02
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_03
| +--- -> mesh_02
| \--- [cell_values]
\--- ...
where ``plain`` names are HDF (sub)groups, ``[bracketed]`` names are datasets, ``{name=value}`` are HDF attributes, ``->`` prefix indicated HDF5 hardlink (transparent to the user); numerical suffixes (``_01``, ...) are auto-allocated. Mesh objects are hardlinked using HDF5 hardlinks if an identical mesh is already stored in the group, based on hexdigest of its full data.
.. note:: This method has not been tested yet. The format is subject to future changes.
"""
import h5py
hdf = h5py.File(fileName, 'a', libver='latest')
if group not in hdf:
gg = hdf.create_group(group)
else:
gg = hdf[group]
# raise IOError('Path "%s" is already used in "%s".'%(path,fileName))
def lowestUnused(trsf, predicate, start=1):
"""
Find the lowest unused index, where *predicate* is used to test for existence, and *trsf* transforms
integer (starting at *start* and incremented until unused value is found) to whatever predicate accepts
as argument. Lowest transformed value is returned.
"""
import itertools
for i in itertools.count(start=start):
t = trsf(i)
if not predicate(t):
return t
# save mesh (not saved if there already)
newgrp = lowestUnused(trsf=lambda i: 'mesh_%02d' % i, predicate=lambda t: t in gg)
mh5 = self.getMesh().asHdf5Object(parentgroup=gg, newgroup=newgrp)
if self.value:
fieldGrp = hdf.create_group(lowestUnused(trsf=lambda i, group=group: group+'/field_%02d' % i, predicate=lambda t: t in hdf))
fieldGrp['mesh'] = mh5
fieldGrp.attrs['fieldID'] = self.fieldID
fieldGrp.attrs['valueType'] = self.valueType
# string/bytes may not contain NULL when stored as string in HDF5
# see http://docs.h5py.org/en/2.3/strings.html
# that's why we cast to opaque type "void" and uncast using tostring before unpickling
fieldGrp.attrs['units'] = numpy.void(pickle.dumps(self.unit))
fieldGrp.attrs['time'] = numpy.void(pickle.dumps(self.time))
# fieldGrp.attrs['time']=self.time.getValue()
if self.fieldType == FieldType.FT_vertexBased:
val = numpy.empty(shape=(self.getMesh().getNumberOfVertices(), self.getRecordSize()), dtype=numpy.float)
for vert in range(self.getMesh().getNumberOfVertices()):
val[vert] = self.getVertexValue(vert).getValue()
fieldGrp['vertex_values'] = val
elif self.fieldType == FieldType.FT_cellBased:
# raise NotImplementedError("Saving cell-based fields to HDF5 is not yet implemented.")
val = numpy.empty(shape=(self.getMesh().getNumberOfCells(), self.getRecordSize()), dtype=numpy.float)
for cell in range(self.getMesh().getNumberOfCells()):
val[cell] = self.getCellValue(cell)
fieldGrp['cell_values'] = val
else:
raise RuntimeError("Unknown fieldType %d." % self.fieldType)
@staticmethod
def makeFromHdf5(fileName, group='component1/part1'):
"""
Restore Fields from HDF5 file.
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be read from (IOError is raised if the group does not exist).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
.. note:: This method has not been tested yet.
"""
import h5py
hdf = h5py.File(fileName, 'r', libver='latest')
grp = hdf[group]
# load mesh and field data from HDF5
meshObjs = [obj for name, obj in grp.items() if name.startswith('mesh_')]
fieldObjs = [obj for name, obj in grp.items() if name.startswith('field_')]
# construct all meshes as mupif objects
meshes = [Mesh.Mesh.makeFromHdf5Object(meshObj) for meshObj in meshObjs]
# construct all fields as mupif objects
ret = []
for f in fieldObjs:
if 'vertex_values' in f:
fieldType, values = FieldType.FT_vertexBased, f['vertex_values']
elif 'cell_values' in f:
fieldType, values = FieldType.FT_cellBased, f['cell_values']
else:
ValueError("HDF5/mupif format error: unable to determine field type.")
fieldID, valueType, units, time = FieldID(f.attrs['fieldID']), f.attrs['valueType'], f.attrs['units'].tostring(), f.attrs['time'].tostring()
if units == '':
units = None # special case, handled at saving time
else:
units = pickle.loads(units)
if time == '':
time = None # special case, handled at saving time
else:
time = pickle.loads(time)
meshIndex = meshObjs.index(f['mesh']) # find which mesh object this field refers to
ret.append(Field(mesh=meshes[meshIndex], fieldID=fieldID, units=units, time=time, valueType=valueType, values=values, fieldType=fieldType))
return ret
def toVTK2(self, fileName, format='ascii'):
"""
Save the instance as Unstructured Grid in VTK2 format (``.vtk``).
:param str fileName: where to save
:param str format: one of ``ascii`` or ``binary``
"""
self.field2VTKData().tofile(filename=fileName, format=format)
@staticmethod
def makeFromVTK2(fileName, unit, time=0, skip=['coolwarm']):
"""
Return fields stored in *fileName* in the VTK2 (``.vtk``) format.
:param str fileName: filename to load from
:param PhysicalUnit unit: physical unit of filed values
:param float time: time value for created fields (time is not saved in VTK2, thus cannot be recovered)
:param [string,] skip: file names to be skipped when reading the input file; the default value skips the default coolwarm colormap.
:returns: one field from VTK
:rtype: Field
"""
import pyvtk
from .dataID import FieldID
if not fileName.endswith('.vtk'):
log.warning('Field.makeFromVTK2: fileName should end with .vtk, you may get in trouble (proceeding).')
ret = []
try:
data = pyvtk.VtkData(fileName) # this is where reading the file happens (inside pyvtk)
except NotImplementedError:
log.info('pyvtk fails to open (binary?) file "%s", trying through vtk.vtkGenericDataReader.' % fileName)
return Field.makeFromVTK3(fileName, time=time, units=unit, forceVersion2=True)
ugr = data.structure
if not isinstance(ugr, pyvtk.UnstructuredGrid):
raise NotImplementedError(
"grid type %s is not handled by mupif (only UnstructuredGrid is)." % ugr.__class__.__name__)
mesh = Mesh.UnstructuredMesh.makeFromPyvtkUnstructuredGrid(ugr)
# get cell and point data
pd, cd = data.point_data.data, data.cell_data.data
for dd, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for d in dd:
# will raise KeyError if fieldID with that name is not defined
if d.name in skip:
continue
fid = FieldID[d.name]
# determine the number of components using the expected number of values from the mesh
expectedNumVal = (mesh.getNumberOfVertices() if fieldType == FieldType.FT_vertexBased else mesh.getNumberOfCells())
nc = len(d.scalars)//expectedNumVal
valueType = ValueType.fromNumberOfComponents(nc)
values = [d.scalars[i*nc:i*nc+nc] for i in range(len(d.scalars))]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=unit, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def toVTK3(self, fileName, **kw):
"""
Save the instance as Unstructured Grid in VTK3 format (``.vtu``). This is a simple proxy for calling :obj:`manyToVTK3` with the instance as the only field to be saved. If multiple fields with identical mesh are to be saved in VTK3, use :obj:`manyToVTK3` directly.
:param fileName: output file name
:param ``**kw``: passed to :obj:`manyToVTK3`
"""
return self.manyToVTK3([self], fileName, **kw)
@staticmethod
def manyToVTK3(fields, fileName, ascii=False, compress=True):
"""
Save all fields passed as argument into VTK3 Unstructured Grid file (``*.vtu``).
All *fields* must be defined on the same mesh object; exception will be raised if this is not the case.
:param list of Field fields:
:param fileName: output file name
:param bool ascii: write numbers are ASCII in the XML-based VTU file (rather than base64-encoded binary in XML)
:param bool compress: apply compression to the data
"""
import vtk
if not fields:
raise ValueError('At least one field must be passed.')
# check if all fields are defined on the same mesh
if len(set([f.mesh for f in fields])) != 1:
raise RuntimeError(
'Not all fields are sharing the same Mesh object (and could not be saved to a single .vtu file')
# convert mesh to VTK UnstructuredGrid
mesh = fields[0].getMesh()
vtkgrid = mesh.asVtkUnstructuredGrid()
# add fields as arrays
for f in fields:
arr = vtk.vtkDoubleArray()
arr.SetNumberOfComponents(f.getRecordSize())
arr.SetName(f.getFieldIDName())
assert f.getFieldType() in (FieldType.FT_vertexBased, FieldType.FT_cellBased) # other future types not handled
if f.getFieldType() == FieldType.FT_vertexBased:
nn = mesh.getNumberOfVertices()
else:
nn = mesh.getNumberOfCells()
arr.SetNumberOfValues(nn)
for i in range(nn):
arr.SetTuple(i, f.giveValue(i))
if f.getFieldType() == FieldType.FT_vertexBased:
vtkgrid.GetPointData().AddArray(arr)
else:
vtkgrid.GetCellData().AddArray(arr)
# write the unstructured grid to file
writer = vtk.vtkXMLUnstructuredGridWriter()
if compress:
writer.SetCompressor(vtk.vtkZLibDataCompressor())
if ascii:
writer.SetDataModeToAscii()
writer.SetFileName(fileName)
# change between VTK5 and VTK6
if vtk.vtkVersion().GetVTKMajorVersion() == 6:
writer.SetInputData(vtkgrid)
else:
writer.SetInputData(vtkgrid)
writer.Write()
# finito
@staticmethod
def makeFromVTK3(fileName, units, time=0, forceVersion2=False):
"""
Create fields from a VTK unstructured grid file (``.vtu``, format version 3, or ``.vtp`` with *forceVersion2*); the mesh is shared between fields.
``vtk.vtkXMLGenericDataObjectReader`` is used to open the file (unless *forceVersion2* is set), but it is checked that contained dataset is a ``vtk.vtkUnstructuredGrid`` and an error is raised if not.
.. note:: Units are not supported when loading from VTK, all fields will have ``None`` unit assigned.
:param str fileName: VTK (``*.vtu``) file
:param PhysicalUnit units: units of read values
:param float time: time value for created fields (time is not saved in VTK3, thus cannot be recovered)
:param bool forceVersion2: if ``True``, ``vtk.vtkGenericDataObjectReader`` (for VTK version 2) will be used to open the file, isntead of ``vtk.vtkXMLGenericDataObjectReader``; this also supposes *fileName* ends with ``.vtk`` (not checked, but may cause an error).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
"""
import vtk
from .dataID import FieldID
# rr=vtk.vtkXMLUnstructuredGridReader()
if forceVersion2 or fileName.endswith('.vtk'):
rr = vtk.vtkGenericDataObjectReader()
else:
rr = vtk.vtkXMLGenericDataObjectReader()
rr.SetFileName(fileName)
rr.Update()
ugrid = rr.GetOutput()
if not isinstance(ugrid, vtk.vtkUnstructuredGrid):
raise RuntimeError("vtkDataObject read from '%s' must be a vtkUnstructuredGrid (not a %s)" % (
fileName, ugrid.__class__.__name__))
# import sys
# sys.stderr.write(str((ugrid,ugrid.__class__,vtk.vtkUnstructuredGrid)))
# make mesh -- implemented separately
mesh = Mesh.UnstructuredMesh.makeFromVtkUnstructuredGrid(ugrid)
# fields which will be returned
ret = []
# get cell and point data
cd, pd = ugrid.GetCellData(), ugrid.GetPointData()
for data, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for idata in range(data.GetNumberOfArrays()):
aname, arr = pd.GetArrayName(idata), pd.GetArray(idata)
nt = arr.GetNumberOfTuples()
if nt == 0:
raise RuntimeError("Zero values in field '%s', unable to determine value type." % aname)
t0 = arr.GetTuple(0)
valueType = ValueType.fromNumberOfComponents(len(arr.GetTuple(0)))
# this will raise KeyError if fieldID with that name not defined
fid = FieldID[aname]
# get actual values as tuples
values = [arr.GetTuple(t) for t in range(nt)]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=units, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def _sum(self, other, sign1, sign2):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to modify the field values.
"""
raise TypeError('Not supported')
def inUnitsOf(self, *units):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to use convertToUnits method
performing in place conversion.
"""
raise TypeError('Not supported')
# def __deepcopy__(self, memo):
# """ Deepcopy operatin modified not to include attributes starting with underscore.
# These are supposed to be the ones valid only to s specific copy of the receiver.
# An example of these attributes are _PyroURI (injected by Application),
# where _PyroURI contains the URI of specific object, the copy should receive
# its own URI
# """
# cls = self.__class__
# dpcpy = cls.__new__(cls)
#
# memo[id(self)] = dpcpy
# for attr in dir(self):
# if not attr.startswith('_'):
# value = getattr(self, attr)
# setattr(dpcpy, attr, copy.deepcopy(value, memo))
# return dpcpy
| lgpl-3.0 |
ml31415/numpy-groupies | numpy_groupies/benchmarks/simple.py | 1 | 4248 | #!/usr/bin/python -B
# -*- coding: utf-8 -*-
from __future__ import print_function
import timeit
import numpy as np
from numpy_groupies.utils import aliasing
from numpy_groupies import aggregate_py, aggregate_np, aggregate_ufunc
from numpy_groupies.aggregate_pandas import aggregate as aggregate_pd
def aggregate_group_loop(*args, **kwargs):
"""wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops."""
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs)
print("TODO: use more extensive tests")
print("")
print("-----simple examples----------")
test_a = np.array([12.0, 3.2, -15, 88, 12.9])
test_group_idx = np.array([1, 0, 1, 4, 1 ])
print("test_a: ", test_a)
print("test_group_idx: ", test_group_idx)
print("aggregate(test_group_idx, test_a):")
print(aggregate_np(test_group_idx, test_a)) # group vals by idx and sum
# array([3.2, 9.9, 0., 0., 88.])
print("aggregate(test_group_idx, test_a, sz=8, func='min', fill_value=np.nan):")
print(aggregate_np(test_group_idx, test_a, size=8, func='min', fill_value=np.nan))
# array([3.2, -15., nan, 88., nan, nan, nan, nan])
print("aggregate(test_group_idx, test_a, sz=5, func=lambda x: ' + '.join(str(xx) for xx in x),fill_value='')")
print(aggregate_np(test_group_idx, test_a, size=5, func=lambda x: ' + '.join(str(xx) for xx in x), fill_value=''))
print("")
print("---------testing--------------")
print("compare against group-and-loop with numpy")
testable_funcs = {aliasing[f]: f for f in (np.sum, np.prod, np.any, np.all, np.min, np.max, np.std, np.var, np.mean)}
test_group_idx = np.random.randint(0, int(1e3), int(1e5))
test_a = np.random.rand(int(1e5)) * 100 - 50
test_a[test_a > 25] = 0 # for use with bool functions
for name, f in testable_funcs.items():
numpy_loop_group = aggregate_group_loop(test_group_idx, test_a, func=f)
for acc_func, acc_name in [(aggregate_np, 'np-optimised'),
(aggregate_ufunc, 'np-ufunc-at'),
(aggregate_py, 'purepy'),
(aggregate_pd, 'pandas')]:
try:
test_out = acc_func(test_group_idx, test_a, func=name)
test_out = np.asarray(test_out)
if not np.allclose(test_out, numpy_loop_group.astype(test_out.dtype)):
print(name, acc_name, "FAILED test, output: [" + acc_name + "; correct]...")
print(np.vstack((test_out, numpy_loop_group)))
else:
print(name, acc_name, "PASSED test")
except NotImplementedError:
print(name, acc_name, "NOT IMPLEMENTED")
print("")
print("----------benchmarking-------------")
print("Note that the actual observed speedup depends on a variety of properties of the input.")
print("Here we are using 100,000 indices uniformly picked from [0, 1000).")
print("Specifically, about 25% of the values are 0 (for use with bool operations),")
print("the remainder are uniformly distribuited on [-50,25).")
print("Times are scaled to 10 repetitions (actual number of reps used may not be 10).")
print(''.join(['function'.rjust(8), 'pure-py'.rjust(14), 'np-grouploop'.rjust(14),
'np-ufuncat'.rjust(14), 'np-optimised'.rjust(14), 'pandas'.rjust(14),
'ratio'.rjust(15)]))
for name, f in testable_funcs.items():
print(name.rjust(8), end='')
times = [None] * 5
for ii, acc_func in enumerate([aggregate_py, aggregate_group_loop,
aggregate_ufunc, aggregate_np,
aggregate_pd]):
try:
func = f if acc_func is aggregate_group_loop else name
reps = 3 if acc_func is aggregate_py else 20
times[ii] = timeit.Timer(lambda: acc_func(test_group_idx, test_a, func=func)).timeit(number=reps) / reps * 10
print(("%.1fms" % ((times[ii] * 1000))).rjust(13), end='')
except NotImplementedError:
print("no-impl".rjust(13), end='')
denom = min(t for t in times if t is not None)
ratios = [("-".center(4) if t is None else str(round(t / denom, 1))).center(5) for t in times]
print(" ", (":".join(ratios)))
| bsd-2-clause |
jseabold/scikit-learn | sklearn/feature_selection/rfe.py | 4 | 15662 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv.split(X, y)):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores / cv.get_n_splits(X, y)
return self
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/decomposition/tests/test_pca.py | 21 | 11810 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggerred it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e351.py | 2 | 6885 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
# subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
justrypython/EAST | svm_model_v2.py | 1 | 2801 | #encoding:UTF-8
import os
import numpy as np
import sys
import cv2
import matplotlib.pyplot as plt
from sklearn.svm import NuSVC, SVC
import datetime
import pickle
#calculate the area
def area(p):
p = p.reshape((-1, 2))
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, np.concatenate((p[1:], [p[0]])))
def calc_xy(p0, p1, p2):
cos = calc_cos(p0, p1, p2)
dis = calc_dis(p0, p2)
return dis * cos, dis * np.sqrt(1 - np.square(cos))
def calc_dis(p0, p1):
return np.sqrt(np.sum(np.square(p0-p1)))
def calc_cos(p0, p1, p2):
A = p1 - p0
B = p2 - p0
num = np.dot(A, B)
demon = np.linalg.norm(A) * np.linalg.norm(B)
return num / demon
def calc_new_xy(boxes):
box0 = boxes[:8]
box1 = boxes[8:]
x, y = calc_xy(box1[4:6], box1[6:], box0[:2])
dis = calc_dis(box1[4:6], box1[6:])
area0 = area(box0)
area1 = area(box1)
return x/dis, y/dis
if __name__ == '__main__':
test = True
path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/'
paths = os.listdir(path)
paths = [i for i in paths if '.txt' in i]
boxes = np.empty((800000, 9))
cnt = 0
for txt in paths:
f = open(path+txt, 'r')
lines = f.readlines()
f.close()
lines = [i.replace('\n', '').split(',') for i in lines]
lines = np.array(lines).astype(np.uint32)
boxes[cnt*10:cnt*10+len(lines)] = lines
cnt += 1
zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0]
zeros_labels = zeros.all(axis=1)
zeros_labels = np.where(zeros_labels==True)
idboxes = boxes[boxes[:, 8]==7]
idboxes = np.tile(idboxes[:, :8], (1, 10))
idboxes = idboxes.reshape((-1, 8))
boxes = np.delete(boxes, zeros_labels[0], axis=0)
idboxes = np.delete(idboxes, zeros_labels[0], axis=0)
boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1)
start_time = datetime.datetime.now()
print start_time
new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes)
end_time = datetime.datetime.now()
print end_time - start_time
if test:
with open('clf_address_v2.pickle', 'rb') as f:
clf = pickle.load(f)
cnt = 0
for i, xy in enumerate(new_xy):
cls = int(clf.predict([xy])[0])
if cls == int(boxes[i, 8]):
cnt += 1
if i % 10000 == 0 and i != 0:
print i, ':', float(cnt) / i
else:
clf = SVC()
start_time = datetime.datetime.now()
print start_time
clf.fit(new_xy[:], boxes[:, 8])
end_time = datetime.datetime.now()
print end_time - start_time
with open('clf.pickle', 'wb') as f:
pickle.dump(clf, f)
print 'end' | gpl-3.0 |
ThomasSweijen/TPF | examples/adaptiveintegrator/simple-scene-plot-NewtonIntegrator.py | 6 | 2027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=0.0,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(5./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
woodscn/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
aerosara/thesis | notebooks_archive_10112014/pycse Examples.py | 1 | 2176 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=3>
# Example from pycse 1
# <codecell>
# copied from http://kitchingroup.cheme.cmu.edu/blog/tag/events/
from pycse import odelay
import matplotlib.pyplot as plt
import numpy as np
def ode(Y,x):
y1, y2 = Y
dy1dx = y2
dy2dx = -y1
return [dy1dx, dy2dx]
def event1(Y, x):
y1, y2 = Y
value = y2 - (-1.0)
isterminal = True
direction = 0
return value, isterminal, direction
def event2(Y, x):
dy1dx, dy2dx = ode(Y,x)
value = dy1dx - 0.0
isterminal = False
direction = -1 # derivative is decreasing towards a maximum
return value, isterminal, direction
Y0 = [2.0, 1.0]
xspan = np.linspace(0, 5)
X, Y, XE, YE, IE = odelay(ode, Y0, xspan, events=[event1, event2])
plt.plot(X, Y)
for ie,xe,ye in zip(IE, XE, YE):
if ie == 1: #this is the second event
y1,y2 = ye
plt.plot(xe, y1, 'ro')
plt.legend(['$y_1$', '$y_2$'], loc='best')
#plt.savefig('images/odelay-mult-eq.png')
plt.show()
# <headingcell level=3>
# Example from pycse 2
# <codecell>
# copied from: http://kitchingroup.cheme.cmu.edu/pycse/pycse.html#sec-10-1-8
# 10.1.8 Stopping the integration of an ODE at some condition
from pycse import *
import numpy as np
k = 0.23
Ca0 = 2.3
def dCadt(Ca, t):
return -k * Ca**2
def stop(Ca, t):
isterminal = True
direction = 0
value = 1.0 - Ca
return value, isterminal, direction
tspan = np.linspace(0.0, 10.0)
t, CA, TE, YE, IE = odelay(dCadt, Ca0, tspan, events=[stop])
print 'At t = {0:1.2f} seconds the concentration of A is {1:1.2f} mol/L.'.format(t[-1], float(CA[-1]))
# <headingcell level=3>
# fsolve example
# <codecell>
from math import cos
def func(x):
return x + 2*cos(x) # finds where this is zero
def func2(x):
out = [x[0]*cos(x[1]) - 4]
out.append(x[1]*x[0] - x[1] - 5)
return out # finds where both elements of this array are zero
from scipy.optimize import fsolve
x0 = fsolve(func, 0.3) # initial guess
print x0
print func(x0)
#-1.02986652932
x02 = fsolve(func2, [1, 1]) # initial guesses
print x02
print func2(x02)
#[ 6.50409711 0.90841421]
| mit |
andyh616/mne-python | mne/tests/test_epochs.py | 1 | 71695 | # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (io, Epochs, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds)
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
def test_reject():
"""Test epochs rejection
"""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad_epochs(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad_epochs,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad_epochs(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
def test_decim():
"""Test epochs decimation
"""
# First with EpochsArray
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = np.random.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.decimate(decim, copy=True).get_data()
data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs, data_epochs_2)
# Now let's do it with some real data
raw, events, picks = _get_data()
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=False)
assert_raises(ValueError, epochs.decimate, -1)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload)
epochs.preload_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
np.random.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad_epochs()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
epochs_no_bl.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 1)
# decim without lowpass
lowpass = raw.info['lowpass']
raw.info['lowpass'] = None
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 2)
raw.info['lowpass'] = lowpass
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# test IO
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_no_bl_read.apply_baseline(baseline)
assert_true(epochs_no_bl_read.baseline == baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(), epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname)
assert_true(len(w) == 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax, picks=picks,
on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.preload_data()
epochs.crop(0, 0, copy=False)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = io.Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = evoked1 + evoked2
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
evoked_diff = evoked1 - evoked1
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad_epochs()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad_epochs()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
assert_true(epochs_resampled is epochs)
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_raises(Exception, caught == 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
epochs.add_proj(projs, remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
# delayed without reject params
assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='delayed', reject=None)
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, reject=reject)
epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload,
add_eeg_ref=True, reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data()[0],
epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs2.get_data(), epochs2.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs2._projector)
assert_allclose(epochs._data, epochs2.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection
"""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for preload in (True, False):
for proj in (True, False, 'delayed'):
for decim in (1, 3):
for ii in range(2):
epochs = Epochs(raw, events, event_id, tmin, tmax,
picks=picks, proj=proj, reject=reject,
preload=preload, decim=decim)
if ii == 1:
epochs.preload_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
if proj is True:
evoked.apply_proj()
epochs_data = epochs.get_data().mean(axis=0)[picks_data]
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
assert_allclose(evoked.data, epochs_data,
rtol=1e-5, atol=1e-15)
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
assert_raises(IndexError, epochs.drop_epochs, [-1])
assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop_epochs([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data()[:2]
tests = [(('mag', False), ('grad', 'eeg')),
(('grad', False), ('mag', 'eeg')),
((False, True), ('grad', 'mag'))]
for (meg, eeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
test = 'eeg' if eeg is True else meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info['ch_names'].pop(0)
epochs_meg.info['nchan'] -= 1
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0., baseline=None)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
tmin=-0.2, baseline=(None, 0))
def test_concatenate_epochs():
"""Test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
def test_add_channels():
"""Test epoch splitting / re-appending channel types
"""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
epoch_meg = epoch.pick_types(meg=True, copy=True)
epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
run_tests_if_main()
| bsd-3-clause |
remenska/rootpy | rootpy/plotting/contrib/plot_corrcoef_matrix.py | 5 | 12192 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from ...extern.six.moves import range
from ...extern.six import string_types
__all__ = [
'plot_corrcoef_matrix',
'corrcoef',
'cov',
]
def plot_corrcoef_matrix(matrix, names=None,
cmap=None, cmap_text=None,
fontsize=12, grid=False,
axes=None):
"""
This function will draw a lower-triangular correlation matrix
Parameters
----------
matrix : 2-dimensional numpy array/matrix
A correlation coefficient matrix
names : list of strings, optional (default=None)
List of the parameter names corresponding to the rows in ``matrix``.
cmap : matplotlib color map, optional (default=None)
Color map used to color the matrix cells.
cmap_text : matplotlib color map, optional (default=None)
Color map used to color the cell value text. If None, then
all values will be black.
fontsize : int, optional (default=12)
Font size of parameter name and correlation value text.
grid : bool, optional (default=False)
If True, then draw dashed grid lines around the matrix elements.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
Notes
-----
NumPy and matplotlib are required
Examples
--------
>>> matrix = corrcoef(data.T, weights=weights)
>>> plot_corrcoef_matrix(matrix, names)
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
if axes is None:
axes = plt.gca()
matrix = np.asarray(matrix)
if matrix.ndim != 2:
raise ValueError("matrix is not a 2-dimensional array or matrix")
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix is not square")
if names is not None and len(names) != matrix.shape[0]:
raise ValueError("the number of names does not match the number of "
"rows/columns in the matrix")
# mask out the upper triangular matrix
matrix[np.triu_indices(matrix.shape[0])] = np.nan
if isinstance(cmap_text, string_types):
cmap_text = cm.get_cmap(cmap_text, 201)
if cmap is None:
cmap = cm.get_cmap('jet', 201)
elif isinstance(cmap, string_types):
cmap = cm.get_cmap(cmap, 201)
# make NaN pixels white
cmap.set_bad('w')
axes.imshow(matrix, interpolation='nearest',
cmap=cmap, origin='upper',
vmin=-1, vmax=1)
axes.set_frame_on(False)
plt.setp(axes.get_yticklabels(), visible=False)
plt.setp(axes.get_yticklines(), visible=False)
plt.setp(axes.get_xticklabels(), visible=False)
plt.setp(axes.get_xticklines(), visible=False)
if grid:
# draw grid lines
for slot in range(1, matrix.shape[0] - 1):
# vertical
axes.plot((slot - 0.5, slot - 0.5),
(slot - 0.5, matrix.shape[0] - 0.5), 'k:', linewidth=1)
# horizontal
axes.plot((-0.5, slot + 0.5),
(slot + 0.5, slot + 0.5), 'k:', linewidth=1)
if names is not None:
for slot in range(1, matrix.shape[0]):
# diagonal
axes.plot((slot - 0.5, slot + 1.5),
(slot - 0.5, slot - 2.5), 'k:', linewidth=1)
# label cell values
for row, col in zip(*np.tril_indices(matrix.shape[0], k=-1)):
value = matrix[row][col]
if cmap_text is not None:
color = cmap_text((value + 1.) / 2.)
else:
color = 'black'
axes.text(
col, row,
"{0:d}%".format(int(value * 100)),
color=color,
ha='center', va='center',
fontsize=fontsize)
if names is not None:
# write parameter names
for i, name in enumerate(names):
axes.annotate(
name, (i, i),
rotation=45,
ha='left', va='bottom',
transform=axes.transData,
fontsize=fontsize)
def cov(m, y=None, rowvar=1, bias=0, ddof=None, weights=None, repeat_weights=0):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
import numpy as np
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
X = np.array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None), np.newaxis)
else:
axis = 1
tup = (np.newaxis, slice(None))
if y is not None:
y = np.array(y, copy=False, ndmin=2, dtype=float)
X = np.concatenate((X, y), axis)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if weights is not None:
weights = np.array(weights, dtype=float)
weights_sum = weights.sum()
if weights_sum <= 0:
raise ValueError(
"sum of weights is non-positive")
X -= np.average(X, axis=1-axis, weights=weights)[tup]
if repeat_weights:
# each weight represents a number of repetitions of an observation
# the total sample size can be determined in this case and we have
# both an unbiased and biased weighted covariance
fact = weights_sum - ddof
else:
# normalize weights so they sum to unity
weights /= weights_sum
# unbiased weighted covariance is not defined if the weights are
# not integral frequencies (repeat-type)
fact = (1. - np.power(weights, 2).sum())
else:
weights = 1
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
fact = float(N - ddof)
if not rowvar:
return (np.dot(weights * X.T, X.conj()) / fact).squeeze()
else:
return (np.dot(weights * X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None, weights=None,
repeat_weights=0):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
import numpy as np
c = cov(x, y, rowvar, bias, ddof, weights, repeat_weights)
if c.size == 0:
# handle empty arrays
return c
try:
d = np.diag(c)
except ValueError: # scalar covariance
return 1
return c / np.sqrt(np.multiply.outer(d, d))
| gpl-3.0 |
CVML/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
sumspr/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
wenhuchen/ETHZ-Bootstrapped-Captioning | visual-concepts/coco/PythonAPI/pycocotools/coco.py | 1 | 16953 | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import urllib
import copy
import itertools
import mask
import os
from collections import defaultdict
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns,cats,imgs = dict(),dict(),dict()
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception("datasetType not supported")
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print 'Loading and preparing results... '
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print("Converting ndarray to lists...")
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print("%d/%d" % (i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
stephenliu1989/HK_DataMiner | hkdataminer/cluster/faiss_dbscan_.py | 1 | 14197 | # -*- coding: utf-8 -*-
"""
DBSCAN Acclerated by Facebook AI Faiss
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
import time
from scipy import sparse
from numba import autojit
import numba
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array, check_consistent_length
#from sklearn.neighbors import NearestNeighbors
from sklearn.cluster._dbscan_inner import dbscan_inner
import faiss
@autojit
def get_neighborhoods(D, I, eps):
neighborhoods = []
for i in range(len(D)):
distances = D[i]
#print(distances)
distances = np.delete(distances, 0)
indices = I[i]
indices = np.delete(indices, 0)
#print(indices)
index = indices[distances <= eps]
neighborhoods.append(index)
#neighborhoods = np.asarray(neighborhoods)
#np.savetxt('faiss_neighborhoods', np.asarray(neighborhoods), fmt='%s')
return np.asarray(neighborhoods)
def cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True):
dimension = X.shape[1]
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
assert not index_cpu.is_trained
index_cpu.train(X)
assert index_cpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
index_cpu.add(X)
n_samples = 1000
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_cpu.search(X[samples], k) # sanity check
while np.min(np.amax(D, axis=1)) < eps:
k = k * 2
# D, I = index_gpu.search(X[samples], k)
#print(np.min(np.amax(D, axis=1)), eps, k)
D, I = index_cpu.search(X[samples], k)
if k > 1024:
k = 1000
#print(np.max(D[:, k - 1]), k, eps)
index_cpu.nprobe = nprobe
D, I = index_cpu.search(X, k) # actual search
return get_neighborhoods(D, I, eps)
def gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True):
dimension = X.shape[1]
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
res = faiss.StandardGpuResources() # use a single GPU
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
# make it an IVF GPU index
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
assert not index_gpu.is_trained
index_gpu.train(X)
assert index_gpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
index_gpu.add(X)
n_samples = 1000
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_gpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_gpu.search(X[samples], k)
#print(np.max(D[:, k - 1]), k, eps)
index_gpu.nprobe = nprobe
D, I = index_gpu.search(X, k) # actual search
return get_neighborhoods(D, I, eps)
def faiss_dbscan(X, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1, GPU=False, IVFFlat=True):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if GPU is True:
neighborhoods = gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat)
else:
neighborhoods = cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class Faiss_DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', n_jobs=1, GPU=False, IVFFlat=True):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.n_jobs = n_jobs
self.GPU = GPU
self.IVFFlat = IVFFlat
self.nlist = nlist
self.nprobe = nprobe
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
#if metric is not "rmsd":
# X = check_array(X, accept_sparse='csr')
#t0 = time.time()
clust = faiss_dbscan(X, eps=self.eps, min_samples=self.min_samples, nlist=self.nlist, nprobe=self.nprobe, sample_weight=sample_weight, GPU=self.GPU, IVFFlat=self.IVFFlat)
#t1 = time.time()
#print("Faiss DBSCAN clustering Time Cost:", t1 - t0)
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
| apache-2.0 |
rexshihaoren/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/computation/eval.py | 7 | 10856 | #!/usr/bin/env python
"""Top level ``eval`` module.
"""
import warnings
import tokenize
from pandas.io.formats.printing import pprint_thing
from pandas.core.computation import _NUMEXPR_INSTALLED
from pandas.core.computation.expr import Expr, _parsers, tokenize_string
from pandas.core.computation.scope import _ensure_scope
from pandas.compat import string_types
from pandas.core.computation.engines import _engines
from pandas.util._validators import validate_bool_kwarg
def _check_engine(engine):
"""Make sure a valid engine is passed.
Parameters
----------
engine : str
Raises
------
KeyError
* If an invalid engine is passed
ImportError
* If numexpr was requested but doesn't exist
Returns
-------
string engine
"""
if engine is None:
if _NUMEXPR_INSTALLED:
engine = 'numexpr'
else:
engine = 'python'
if engine not in _engines:
raise KeyError('Invalid engine {0!r} passed, valid engines are'
' {1}'.format(engine, list(_engines.keys())))
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == 'numexpr':
if not _NUMEXPR_INSTALLED:
raise ImportError("'numexpr' is not installed or an "
"unsupported version. Cannot use "
"engine='numexpr' for query/eval "
"if 'numexpr' is not installed")
return engine
def _check_parser(parser):
"""Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in _parsers:
raise KeyError('Invalid parser {0!r} passed, valid parsers are'
' {1}'.format(parser, _parsers.keys()))
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, '__getitem__'):
name = type(resolver).__name__
raise TypeError('Resolver of type %r does not implement '
'the __getitem__ method' % name)
def _check_expression(expr):
"""Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr):
"""Convert an object to an expression.
Thus function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
s : unicode
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = pprint_thing(expr)
_check_expression(s)
return s
def _check_for_locals(expr, stack_level, parser):
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != 'pandas'
if not_pandas_parser:
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = ("The '@' prefix is not allowed in "
"top-level eval calls, \nplease refer to "
"your variables by name without the '@' "
"prefix")
if at_top_of_stack or not_pandas_parser:
for toknum, tokval in tokenize_string(expr):
if toknum == tokenize.OP and tokval == '@':
raise SyntaxError(msg)
def eval(expr, parser='pandas', engine=None, truediv=True,
local_dict=None, global_dict=None, resolvers=(), level=0,
target=None, inplace=None):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~pandas.DataFrame.query` method to inject the
:attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : a target object for assignment, optional, default is None
essentially this is a passed in resolver
inplace : bool, default True
If expression mutates, whether to modify object inplace or return
copy with mutation.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.eval
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
first_expr = True
if isinstance(expr, string_types):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != '']
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError("multi-line expressions are only valid in the "
"context of data, use DataFrame.eval")
first_expr = True
for expr in exprs:
expr = _convert_expression(expr)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = _ensure_scope(level + 1, global_dict=global_dict,
local_dict=local_dict, resolvers=resolvers,
target=target)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None and multi_line:
raise ValueError("Multi-line expressions are only valid"
" if all expressions contain an assignment")
# assign if needed
if env.target is not None and parsed_expr.assigner is not None:
if inplace is None:
warnings.warn(
"eval expressions containing an assignment currently"
"default to operating inplace.\nThis will change in "
"a future version of pandas, use inplace=True to "
"avoid this warning.",
FutureWarning, stacklevel=3)
inplace = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
target = env.target.copy()
else:
target = env.target
target[parsed_expr.assigner] = ret
if not resolvers:
resolvers = ({parsed_expr.assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if parsed_expr.assigner in resolver:
resolver[parsed_expr.assigner] = ret
break
else:
resolvers += ({parsed_expr.assigner: ret},)
ret = None
first_expr = False
if not inplace and inplace is not None:
return target
return ret
| mit |
RayMick/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
ARUNSOORAJPS/flipkart_gridlock | src/main.py | 1 | 2686 | # -*- coding: utf-8 -*-
# @Author: chandan
# @Date: 2017-07-08 00:32:09
# @Last Modified by: chandan
# @Last Modified time: 2017-07-08 11:13:46
from data_utils import read_file
from config import DATA_DIR, SCORE_COLUMNS
import os
from model import train_model, test_model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import os.path as osp
ACC_FILE = 'RAW_ACCELEROMETERS.txt'
GPS_FILE = 'RAW_GPS.txt'
VEHDET_FILE = 'PROC_VEHICLE_DETECTION.txt'
SCORE_FILE = 'SEMANTIC_ONLINE.txt'
def main():
# read acc, gps, veh det for multiple drivers, scenes
X_dfs, Y_dfs = [], []
driver_dir = 'D1'
for drive_dir in os.listdir(osp.join(DATA_DIR, driver_dir)):
drive_path = osp.join(DATA_DIR, driver_dir, drive_dir)
print drive_path
acc = read_file(osp.join(drive_path, ACC_FILE))
gps = read_file(osp.join(drive_path, GPS_FILE))
veh = read_file(osp.join(drive_path, VEHDET_FILE))
score = read_file(osp.join(drive_path, SCORE_FILE))
datasets = [acc, gps, veh, score]
n_rows = min(map(len, datasets))
# sample high frequency data to lowest frequency
for i in range(len(datasets)):
# drop time column
datasets[i].drop(0, 1, inplace=True)
if len(datasets[i]) > n_rows:
step = len(datasets[i]) / n_rows
ndx = xrange(0, n_rows * step, step)
datasets[i] = datasets[i].ix[ndx]
datasets[i] = datasets[i].reset_index(drop=True)
score_df = datasets[-1]
datasets = datasets[:-1]
Y_df = score.ix[:, SCORE_COLUMNS]
# create dataset
X_df = pd.concat(datasets, axis=1, ignore_index=True)
X_df.fillna(0, inplace=True)
print "X:", X_df.shape
print "Y:", score_df.shape
X_dfs.append(X_df)
Y_dfs.append(Y_df)
# preprocess
X_df = pd.concat(X_dfs, ignore_index=True)
X = X_df.values.astype('float32')
Y = pd.concat(Y_dfs, ignore_index=True).values
print "X shape:", X.shape
print "Y shape:", Y.shape
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
X_tr, X_ts, Y_tr, Y_ts = train_test_split(X, Y, test_size=0.2)
# train
print "X Train shape:", X_tr.shape
print "Y Train shape:", Y_tr.shape
print "X test shape:", X_ts.shape
print "Y test shape:", Y_ts.shape
seq_len = 16
X_tr_seq = X_to_seq(X, seq_len, 1)
Y_tr = Y_tr[seq_len:]
X_ts_seq = X_to_seq(X_ts, seq_len, 1)
Y_ts = Y_ts[seq_len:]
#train_model(X_tr, Y_tr)
loss = test_model(X_ts_seq, Y_ts)
print loss
def X_to_seq(X, seq_len=16, stride=1):
X_seqs = []
for start_ndx in range(0, len(X) - seq_len, stride):
X_seqs.append(X[start_ndx : start_ndx + seq_len])
return np.array(X_seqs)
if __name__ == '__main__':
main() | mit |
gotomypc/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
nrhine1/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
SamProtas/PALiquor | geocode_fixes.py | 1 | 2733 | import os
import pandas as pd
import numpy as np
import sqlite3
import requests
import time
def fix_location(lid, new_address):
pd.set_option('display.mpl_style', 'default')
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE1 = os.path.join(PROJECT_ROOT, 'dbs', 'licensees.db')
conn1 = sqlite3.connect(DATABASE1)
c = conn1.cursor()
c.execute('SELECT address, latitude, longitude FROM licensees WHERE lid = ?',[lid])
old_info = c.fetchone()
old_latitude = old_info[1]
old_longitude = old_info[2]
if old_latitude or old_longitude:
return 'No need to fix. Aborting geocode call.'
api_key = 'NOT MY REAL KEY!!!!!'
baseurl = 'https://maps.googleapis.com/maps/api/geocode/json?key='+api_key+'&address='
fullurl = baseurl + new_address
page = requests.get(fullurl)
latitude = page.json()['results'][0]['geometry']['location']['lat']
longitude = page.json()['results'][0]['geometry']['location']['lng']
c.execute('UPDATE licensees SET address = ?, latitude = ?, longitude = ? WHERE lid = ?',[new_address, latitude, longitude, lid])
conn1.commit()
c.close()
return 'Good Fix'
# Manually fixed addresses
fix_location(233,'US Customs House Chestnut Street Philadelphia PA')
time.sleep(.2)
fix_location(43444, '431 South Streeet Philadelphia PA')
time.sleep(.2)
fix_location(45162, '2457 Grant Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(69585, '2400 Strawberry Mansion Drive Philadelphia, PA 19132')
time.sleep(.2)
fix_location(44218, 'Chickie and Petes Roosevelt Boulevard, Philadelphia, PA 19116')
time.sleep(.2)
fix_location(48788, 'Diamond Club at Mitten Hall 1913 North Broad Street Philadelphia, PA 19122')
time.sleep(.2)
fix_location(64349, '51 North 12th Street Philadelphia, PA 19107')
time.sleep(.2)
fix_location(64754, '1420 Locust Street Philadelphia PA 19102')
time.sleep(.2)
fix_location(50302, '39 Snyder Ave Philadelphia PA 19148')
time.sleep(.2)
fix_location(61215, '9910 Frankford Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(65590, '11000 E Roosevelt BLVD Philadelphia PA')
time.sleep(.2)
fix_location(26715, 'Knights Road Shopping Center 4018 Woodhaven Road Philadelphia, PA 19154')
time.sleep(.2)
fix_location(66741, '9183 Roosevelt BLVD Philadelphia PA 19114')
time.sleep(.2)
fix_location(65221, '129 S 30th St Philadelphia PA 19104')
time.sleep(.2)
fix_location(23775, 'The Bellevue Philadelphia PA 19103')
time.sleep(.2)
fix_location(55796, '5765 Wister St Philadelphia PA 19138')
time.sleep(.2)
fix_location(25469, 'Market East Philadelphia PA 19107')
time.sleep(.2)
fix_location(1140, 'torresdale and decatour, philadelphia pa')
| gpl-2.0 |
pythonvietnam/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
skggm/skggm | examples/trace_plot_example.py | 1 | 3138 | """
Visualize Regularization Path
=============================
Plot the edge level coefficients (inverse covariance entries)
as a function of the regularization parameter.
"""
import sys
import numpy as np
from sklearn.datasets import make_sparse_spd_matrix
sys.path.append("..")
from inverse_covariance import QuicGraphicalLasso
from inverse_covariance.plot_util import trace_plot
from inverse_covariance.profiling import LatticeGraph
def make_data(n_samples, n_features):
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng
)
cov = np.linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
return X, cov, prec
def make_data_banded(n_samples, n_features):
alpha = 0.1
cov, prec, adj = LatticeGraph(
n_blocks=2, random_sign=True, chain_blocks=True, seed=1
).create(n_features, alpha)
prng = np.random.RandomState(2)
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
return X, cov, prec
def show_quic_coefficient_trace(X):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=20)
def show_quic_coefficient_trace_truth(X, truth):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=6, ground_truth=truth)
if __name__ == "__main__":
# example 1
n_samples = 10
n_features = 5
X, cov, prec = make_data(n_samples, n_features)
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=10")
print(" n_features=5")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=100")
print(" n_features=5")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
# example 2
n_samples = 110
n_features = 100
X, cov, prec = make_data_banded(n_samples, n_features)
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
| mit |
trungnt13/scikit-learn | sklearn/datasets/lfw.py | 38 | 19042 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
mjvakili/ccppabc | ccppabc/code/archive/wp_covariance.py | 1 | 1717 | from halotools.empirical_models import Zheng07 , model_defaults
from halotools.mock_observables import wp
from halotools.mock_observables.clustering import tpcf
from halotools.empirical_models.mock_helpers import (three_dim_pos_bundle,
infer_mask_from_kwargs)
from halotools.mock_observables.clustering import wp
from halotools.sim_manager import supported_sims
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
import time
import numpy as np
model = Zheng07()
xir = []
for i in range(500):
model.populate_mock()
xir.append(model.mock.compute_galaxy_clustering()[1])
covar = np.cov(np.array(xir).T)
np.savetxt("clustering_covariance_Mr20.dat" , covar)
"""
a = time.time()
model.mock.compute_galaxy_clustering()
print time.time() - a
rbins = model_defaults.default_rbins
rbin_centers = (rbins[1:] + rbins[:-1])/2.
cat = supported_sims.HaloCatalog()
l = cat.Lbox
print l
p_bins = np.linspace(0,l/2,200)
mask = infer_mask_from_kwargs(model.mock.galaxy_table)
pos = three_dim_pos_bundle(table=model.mock.galaxy_table,
key1='x', key2='y', key3='z', mask=mask,
return_complement=False)
figure = plt.figure(figsize=(10,10))
cl = wp(pos , rbins, p_bins , period = l , estimator = 'Landy-Szalay')
for n_pbins in np.array([2,8,16]):
p_bins = np.linspace(0 , l/2 , n_pbins)
a = time.time()
clustering = wp(pos, rbins, p_bins , period = l , estimator = 'Landy-Szalay')
print time.time() - a
plt.plot(rbin_centers , (clustering)/cl , label = "$N\pi_{bin}$="+str(n_pbins) , lw = 2)
plt.xscale("Log")
plt.yscale("Log")
plt.legend()
plt.savefig("/home/mj/public_html/wpex.png")"""
| mit |
vtsuperdarn/davitpy | davitpy/pydarn/proc/music/music.py | 2 | 85275 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
----------
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall 2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
from strongest signal to weakest, as determined by the MUSIC algorithm.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
string_info = []
if sort_key is not None:
orders = [x[sort_key] for x in signal_list]
orders.sort()
for order in orders:
for sig in signal_list:
if sig[sort_key] == order:
string_info.append(stringify_signal(sig))
signal_list.remove(sig)
else:
for sig in signal_list:
string_info.append(stringify_signal(sig))
return string_info
class SigDetect(object):
"""Class to hold information about detected signals.
Methods
-------
string
reorder
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self):
pass
def string(self):
"""Method to convert a list of signal dictionaries into strings.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
return stringify_signal_list(self.info)
def reorder(self):
"""Method to sort items in .info by signal maximum value (from the scaled kArr) and update nrSignals.
Written by Nathaniel A. Frissell, Fall 2013
"""
#Do the sorting...
from operator import itemgetter
newlist = sorted(self.info,key=itemgetter('max'),reverse=True)
#Put in the order numbers...
order = 1
for item in newlist:
item['order'] = order
order = order + 1
#Save the list to the dataObj...
self.info = newlist
#Update the nrSigs
self.nrSigs = len(newlist)
class musicDataObj(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
time : list of datetime.datetime
list of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
comment : Optional[str]
String to be appended to the history of this object
parent : Optional[musicArray]
reference to parent musicArray object
**metadata
keywords sent to matplot lib, etc.
Attributes
----------
time : numpy.array of datetime.datetime
numpy array of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
metadata : dict
keywords sent to matplot lib, etc.
history : dict
Methods
---------
copy
setActive
nyquistFrequency
samplePeriod
applyLimits
setMetadata
printMetadata
appendHistory
printHistory
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, time, data, fov=None, comment=None, parent=0, **metadata):
self.parent = parent
self.time = np.array(time)
self.data = np.array(data)
self.fov = fov
self.metadata = {}
for key in metadata: self.metadata[key] = metadata[key]
self.history = {datetime.datetime.now():comment}
def copy(self,newsig,comment):
"""Copy a musicDataObj object. This deep copies data and metadata, updates the serial
number, and logs a comment in the history. Methods such as plot are kept as a reference.
Parameters
----------
newsig : str
Name for the new musicDataObj object.
comment : str
Comment describing the new musicDataObj object.
Returns
-------
newsigobj : musicDataObj
Copy of the original musicDataObj with new name and history entry.
Written by Nathaniel A. Frissell, Fall 2013
"""
serial = self.metadata['serial'] + 1
newsig = '_'.join(['DS%03d' % serial,newsig])
setattr(self.parent,newsig,copy.copy(self))
newsigobj = getattr(self.parent,newsig)
newsigobj.time = copy.deepcopy(self.time)
newsigobj.data = copy.deepcopy(self.data)
newsigobj.fov = copy.deepcopy(self.fov)
newsigobj.metadata = copy.deepcopy(self.metadata)
newsigobj.history = copy.deepcopy(self.history)
newsigobj.metadata['dataSetName'] = newsig
newsigobj.metadata['serial'] = serial
newsigobj.history[datetime.datetime.now()] = '['+newsig+'] '+comment
return newsigobj
def setActive(self):
"""Sets this signal as the currently active signal.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.parent.active = self
def nyquistFrequency(self,timeVec=None):
"""Calculate the Nyquist frequency of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
nq : float
Nyquist frequency of the signal in Hz.
Written by Nathaniel A. Frissell, Fall 2013
"""
dt = self.samplePeriod(timeVec=timeVec)
nyq = float(1. / (2*dt))
return nyq
def samplePeriod(self,timeVec=None):
"""Calculate the sample period of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
samplePeriod : float
samplePeriod: sample period of signal in seconds.
Written by Nathaniel A. Frissell, Fall 2013
"""
if timeVec == None: timeVec = self.time
diffs = np.diff(timeVec)
diffs_unq = np.unique(diffs)
self.diffs = diffs_unq
if len(diffs_unq) == 1:
samplePeriod = diffs[0].total_seconds()
else:
diffs_sec = np.array([x.total_seconds() for x in diffs])
maxDt = np.max(diffs_sec)
avg = np.mean(diffs_sec)
md = self.metadata
warn = 'WARNING'
if md.has_key('title'): warn = ' '.join([warn,'FOR','"'+md['title']+'"'])
logging.warning(warn + ':')
logging.warning(' Date time vector is not regularly sampled!')
logging.warning(' Maximum difference in sampling rates is ' + str(maxDt) + ' sec.')
logging.warning(' Using average sampling period of ' + str(avg) + ' sec.')
samplePeriod = avg
import ipdb; ipdb.set_trace()
return samplePeriod
def applyLimits(self,rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment='Limits Applied'):
"""Removes data outside of the rangeLimits, gateLimits, and timeLimits boundaries.
Parameters
----------
rangeLimits : Optional[interable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
timeLimits : Optional[]
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Returns
-------
newMusicDataObj : musicDataObj
New musicDataObj. The musicDataObj is also stored in it's parent musicArray object.
Written by Nathaniel A. Frissell, Fall 2013
"""
return applyLimits(self.parent,self.metadata['dataSetName'],rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits,newDataSetName=newDataSetName,comment=comment)
def setMetadata(self,**metadata):
"""Adds information to the current musicDataObj's metadata dictionary.
Metadata affects various plotting parameters and signal processing routinges.
Parameters
----------
**metadata :
keywords sent to matplot lib, etc.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.metadata = dict(self.metadata.items() + metadata.items())
def printMetadata(self):
"""Nicely print all of the metadata associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.metadata.keys()
keys.sort()
for key in keys:
print key+':',self.metadata[key]
def appendHistory(self,comment):
"""Add an entry to the processing history dictionary of the current musicDataObj object.
Parameters
----------
comment : string
Infomation to add to history dictionary.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.history[datetime.datetime.now()] = '['+self.metadata['dataSetName']+'] '+comment
def printHistory(self):
"""Nicely print all of the processing history associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.history.keys()
keys.sort()
for key in keys:
print key,self.history[key]
class musicArray(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
myPtr : pydarn.sdio.radDataTypes.radDataPtr
contains the pipeline to the data we are after
sTime : Optional[datetime.datetime]
start time UT (if None myPtr.sTime is used)
eTime : Optional[datetime.datetime]
end time UT (if None myPtr.eTime is used)
param : Optional[str]
Radar FIT parameter to load and process. Any appropriate attribute of the
FIT data structure is allowed.
gscat : Optional[int]
Ground scatter flag.
0: all backscatter data
1: ground backscatter only
2: ionospheric backscatter only
3: all backscatter data with a ground backscatter flag.
fovElevation : Optional[float]
Passed directly to pydarn.radar.radFov.fov()
fovModel : Optional[str]
Scatter mapping model.
GS : Ground Scatter Mapping Model. See Bristow et al. [1994] (default)
IS : Standard SuperDARN scatter mapping model.
S : Standard projection model
E1 : for Chisham E-region 1/2-hop ionospheric projection model
F1 : for Chisham F-region 1/2-hop ionospheric projection model
F3 : for Chisham F-region 1 1/2-hop ionospheric projection model
C : Chisham projection model
None : if you trust your elevation or altitude values
fovCoords : Optional[str]
Map coordinate system. WARNING: 'geo' is curently only tested coordinate system.
full_array : Optional[bool]
If True, make the data array the full beam, gate dimensions listed in the hdw.dat file.
If False, truncate the array to the maximum dimensions that there is actually data.
False will save space without throwing out any data, but sometimes it is easier to work
with the full-size array.
Attributes
----------
messages : list
prm :
Methods
-------
get_data_sets
Example
-------
#Set basic event parameters.
rad ='wal'
sTime = datetime.datetime(2011,5,9,8,0)
eTime = datetime.datetime(2011,5,9,19,0)
#Connect to a SuperDARN data source.
myPtr = pydarn.sdio.radDataOpen(sTime,rad,eTime=eTime)
#Create the musicArray Object.
dataObj = music.musicArray(myPtr,fovModel='GS')
References
----------
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,myPtr,sTime=None,eTime=None,param='p_l',gscat=1,
fovElevation=None,fovModel='GS',fovCoords='geo',full_array=False):
from davitpy import pydarn
# Create a list that can be used to store top-level messages.
self.messages = []
no_data_message = 'No data for this time period.'
# If no data, report and return.
if myPtr is None:
self.messages.append(no_data_message)
return
if sTime == None: sTime = myPtr.sTime
if eTime == None: eTime = myPtr.eTime
scanTimeList = []
dataList = []
cpidList = []
#Subscripts of columns in the dataList/dataArray
scanInx = 0
dateInx = 1
beamInx = 2
gateInx = 3
dataInx = 4
beamTime = sTime
scanNr = np.uint64(0)
fov = None
# Create a place to store the prm data.
prm = emptyObj()
prm.time = []
prm.mplgs = []
prm.nave = []
prm.noisesearch = []
prm.scan = []
prm.smsep = []
prm.mplgexs = []
prm.xcf = []
prm.noisesky = []
prm.rsep = []
prm.mppul = []
prm.inttsc = []
prm.frang = []
prm.bmazm = []
prm.lagfr = []
prm.ifmode = []
prm.noisemean = []
prm.tfreq = []
prm.inttus = []
prm.rxrise = []
prm.mpinc = []
prm.nrang = []
while beamTime < eTime:
#Load one scan into memory.
# myScan = pydarn.sdio.radDataRead.radDataReadScan(myPtr)
myScan = myPtr.readScan()
if myScan == None: break
goodScan = False # This flag turns to True as soon as good data is found for the scan.
for myBeam in myScan:
#Calculate the field of view if it has not yet been calculated.
if fov == None:
radStruct = pydarn.radar.radStruct.radar(radId=myPtr.stid)
site = pydarn.radar.radStruct.site(radId=myPtr.stid,dt=sTime)
fov = pydarn.radar.radFov.fov(frang=myBeam.prm.frang, rsep=myBeam.prm.rsep, site=site,elevation=fovElevation,model=fovModel,coords=fovCoords)
#Get information from each beam in the scan.
beamTime = myBeam.time
bmnum = myBeam.bmnum
# Save all of the radar operational parameters.
prm.time.append(beamTime)
prm.mplgs.append(myBeam.prm.mplgs)
prm.nave.append(myBeam.prm.nave)
prm.noisesearch.append(myBeam.prm.noisesearch)
prm.scan.append(myBeam.prm.scan)
prm.smsep.append(myBeam.prm.smsep)
prm.mplgexs.append(myBeam.prm.mplgexs)
prm.xcf.append(myBeam.prm.xcf)
prm.noisesky.append(myBeam.prm.noisesky)
prm.rsep.append(myBeam.prm.rsep)
prm.mppul.append(myBeam.prm.mppul)
prm.inttsc.append(myBeam.prm.inttsc)
prm.frang.append(myBeam.prm.frang)
prm.bmazm.append(myBeam.prm.bmazm)
prm.lagfr.append(myBeam.prm.lagfr)
prm.ifmode.append(myBeam.prm.ifmode)
prm.noisemean.append(myBeam.prm.noisemean)
prm.tfreq.append(myBeam.prm.tfreq)
prm.inttus.append(myBeam.prm.inttus)
prm.rxrise.append(myBeam.prm.rxrise)
prm.mpinc.append(myBeam.prm.mpinc)
prm.nrang.append(myBeam.prm.nrang)
#Get the fitData.
fitDataList = getattr(myBeam.fit,param)
slist = getattr(myBeam.fit,'slist')
gflag = getattr(myBeam.fit,'gflg')
if len(slist) > 1:
for (gate,data,flag) in zip(slist,fitDataList,gflag):
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
elif len(slist) == 1:
gate,data,flag = (slist[0],fitDataList[0],gflag[0])
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
else:
continue
if goodScan:
#Determine the start time for each scan and save to list.
scanTimeList.append(min([x.time for x in myScan]))
#Advance to the next scan number.
scanNr = scanNr + 1
#Convert lists to numpy arrays.
timeArray = np.array(scanTimeList)
dataListArray = np.array(dataList)
# If no data, report and return.
if dataListArray.size == 0:
self.messages.append(no_data_message)
return
#Figure out what size arrays we need and initialize the arrays...
nrTimes = int(np.max(dataListArray[:,scanInx]) + 1)
if full_array:
nrBeams = int(fov.beams.max() + 1)
nrGates = int(fov.gates.max() + 1)
else:
nrBeams = int(np.max(dataListArray[:,beamInx]) + 1)
nrGates = int(np.max(dataListArray[:,gateInx]) + 1)
#Make sure the FOV is the same size as the data array.
if len(fov.beams) != nrBeams:
fov.beams = fov.beams[0:nrBeams]
fov.latCenter = fov.latCenter[0:nrBeams,:]
fov.lonCenter = fov.lonCenter[0:nrBeams,:]
fov.slantRCenter = fov.slantRCenter[0:nrBeams,:]
fov.latFull = fov.latFull[0:nrBeams+1,:]
fov.lonFull = fov.lonFull[0:nrBeams+1,:]
fov.slantRFull = fov.slantRFull[0:nrBeams+1,:]
if len(fov.gates) != nrGates:
fov.gates = fov.gates[0:nrGates]
fov.latCenter = fov.latCenter[:,0:nrGates]
fov.lonCenter = fov.lonCenter[:,0:nrGates]
fov.slantRCenter = fov.slantRCenter[:,0:nrGates]
fov.latFull = fov.latFull[:,0:nrGates+1]
fov.lonFull = fov.lonFull[:,0:nrGates+1]
fov.slantRFull = fov.slantRFull[:,0:nrGates+1]
#Convert the dataListArray into a 3 dimensional array.
dataArray = np.ndarray([nrTimes,nrBeams,nrGates])
dataArray[:] = np.nan
for inx in range(len(dataListArray)):
dataArray[int(dataListArray[inx,scanInx]),int(dataListArray[inx,beamInx]),int(dataListArray[inx,gateInx])] = dataListArray[inx,dataInx]
#Make metadata block to hold information about the processing.
metadata = {}
metadata['dType'] = myPtr.dType
metadata['stid'] = myPtr.stid
metadata['name'] = radStruct.name
metadata['code'] = radStruct.code
metadata['fType'] = myPtr.fType
metadata['cp'] = myPtr.cp
metadata['channel'] = myPtr.channel
metadata['sTime'] = sTime
metadata['eTime'] = eTime
metadata['param'] = param
metadata['gscat'] = gscat
metadata['elevation'] = fovElevation
metadata['model'] = fovModel
metadata['coords'] = fovCoords
dataSet = 'DS000_originalFit'
metadata['dataSetName'] = dataSet
metadata['serial'] = 0
comment = '['+dataSet+'] '+ 'Original Fit Data'
#Save data to be returned as self.variables
setattr(self,dataSet,musicDataObj(timeArray,dataArray,fov=fov,parent=self,comment=comment))
newSigObj = getattr(self,dataSet)
setattr(newSigObj,'metadata',metadata)
#Set the new data active.
newSigObj.setActive()
#Make prm data part of the object.
self.prm = prm
def get_data_sets(self):
"""Return a sorted list of musicDataObj's contained in this musicArray.
Returns
-------
dataSets : list of str
Names of musicDataObj's contained in this musicArray.
Written by Nathaniel A. Frissell, Fall 2013
"""
attrs = dir(self)
dataSets = []
for item in attrs:
if item.startswith('DS'):
dataSets.append(item)
dataSets.sort()
return dataSets
def beamInterpolation(dataObj,dataSet='active',newDataSetName='beamInterpolated',comment='Beam Linear Interpolation'):
"""Interpolates the data in a musicArray object along the beams of the radar. This method will ensure that no
rangegates are missing data. Ranges outside of metadata['gateLimits'] will be set to 0.
The result is stored as a new musicDataObj in the given musicArray object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
currentData = getDataSet(dataObj,dataSet)
nrTimes = len(currentData.time)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for tt in range(nrTimes):
for bb in range(nrBeams):
rangeVec = currentData.fov.slantRCenter[bb,:]
input_x = copy.copy(rangeVec)
input_y = currentData.data[tt,bb,:]
#If metadata['gateLimits'], select only those measurements...
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates <= limits[1]))[0]
if len(gateInx) < 2: continue
input_x = input_x[gateInx]
input_y = input_y[gateInx]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
intFn = interp1d(input_x,input_y,bounds_error=False,fill_value=0)
interpArr[tt,bb,:] = intFn(rangeVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = interpArr
newDataSet.setActive()
def defineLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,beamLimits=None,timeLimits=None):
"""Sets the range, gate, beam, and time limits for the chosen data set. This method only changes metadata;
it does not create a new data set or alter the data in any way. If you specify rangeLimits, they will be changed to correspond
with the center value of the range cell. Gate limits always override range limits.
Use the applyLimits() method to remove data outside of the data limits.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
try:
if (rangeLimits != None) or (gateLimits != None):
if (rangeLimits != None) and (gateLimits == None):
inx = np.where(np.logical_and(currentData.fov.slantRCenter >= rangeLimits[0],currentData.fov.slantRCenter <= rangeLimits[1]))
gateLimits = [np.min(inx[1][:]),np.max(inx[1][:])]
if gateLimits != None:
rangeMin = np.int(np.min(currentData.fov.slantRCenter[:,gateLimits[0]]))
rangeMax = np.int(np.max(currentData.fov.slantRCenter[:,gateLimits[1]]))
rangeLimits = [rangeMin,rangeMax]
currentData.metadata['gateLimits'] = gateLimits
currentData.metadata['rangeLimits'] = rangeLimits
if beamLimits != None:
currentData.metadata['beamLimits'] = beamLimits
if timeLimits != None:
currentData.metadata['timeLimits'] = timeLimits
except:
logging.warning("An error occured while defining limits. No limits set. Check your input values.")
def checkDataQuality(dataObj,dataSet='active',max_off_time=10,sTime=None,eTime=None):
"""Mark the data set as bad (metadata['good_period'] = False) if the radar was not operational within the chosen time period
for a specified length of time.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
max_off_time : Optional[int/float]
Maximum length in minutes radar may remain off.
sTime : Optional[datetime.datetime]
Starting time of checking period. If None, min(currentData.time) is used.
eTime : Optional[datetime.datetime]
End time of checking period. If None, max(currentData.time) is used.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
if sTime is None:
sTime = np.min(currentData.time)
if eTime is None:
eTime = np.max(currentData.time)
time_vec = currentData.time[np.logical_and(currentData.time > sTime, currentData.time < eTime)]
time_vec = np.concatenate(([sTime],time_vec,[eTime]))
max_diff = np.max(np.diff(time_vec))
if max_diff > datetime.timedelta(minutes=max_off_time):
currentData.setMetadata(good_period=False)
else:
currentData.setMetadata(good_period=True)
return dataObj
def applyLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment=None):
"""Removes data outside of the rangeLimits and gateLimits boundaries.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Returns
-------
newData : musicDataObj
Processed version of input musicDataObj (if succeeded), or the original musicDataObj (if failed).
Written by Nathaniel A. Frissell, Fall 2013
"""
if (rangeLimits != None) or (gateLimits != None) or (timeLimits != None):
defineLimits(dataObj,dataSet='active',rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits)
currentData = getDataSet(dataObj,dataSet)
try:
#Make a copy of the current data set.
commentList = []
if (currentData.metadata.has_key('timeLimits') == False and
currentData.metadata.has_key('beamLimits') == False and
currentData.metadata.has_key('gateLimits') == False):
return currentData
newData = currentData.copy(newDataSetName,comment)
#Apply the gateLimits
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates<= limits[1]))[0]
newData.data = newData.data[:,:,gateInx]
newData.fov.gates = newData.fov.gates[gateInx]
newData.fov.latCenter = newData.fov.latCenter[:,gateInx]
newData.fov.lonCenter = newData.fov.lonCenter[:,gateInx]
newData.fov.slantRCenter = newData.fov.slantRCenter[:,gateInx]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
gateInxFull = np.append(gateInx,gateInx[-1]+1) #We need that extra gate since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[:,gateInxFull]
newData.fov.lonFull = newData.fov.lonFull[:,gateInxFull]
newData.fov.slantRFull = newData.fov.slantRFull[:,gateInxFull]
commentList.append('gate: %i,%i' % tuple(limits))
rangeLim = (np.min(newData.fov.slantRCenter), np.max(newData.fov.slantRCenter))
commentList.append('range [km]: %i,%i' % rangeLim)
#Remove limiting item from metadata.
newData.metadata.pop('gateLimits')
if newData.metadata.has_key('rangeLimits'): newData.metadata.pop('rangeLimits')
#Apply the beamLimits.
if currentData.metadata.has_key('beamLimits'):
limits = currentData.metadata['beamLimits']
beamInx = np.where(np.logical_and(currentData.fov.beams >= limits[0],currentData.fov.beams <= limits[1]))[0]
newData.data = newData.data[:,beamInx,:]
newData.fov.beams = newData.fov.beams[beamInx]
newData.fov.latCenter = newData.fov.latCenter[beamInx,:]
newData.fov.lonCenter = newData.fov.lonCenter[beamInx,:]
newData.fov.slantRCenter = newData.fov.slantRCenter[beamInx,:]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
beamInxFull = np.append(beamInx,beamInx[-1]+1) #We need that extra beam since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[beamInxFull,:]
newData.fov.lonFull = newData.fov.lonFull[beamInxFull,:]
newData.fov.slantRFull = newData.fov.slantRFull[beamInxFull,:]
commentList.append('beam: %i,%i' % tuple(limits))
#Remove limiting item from metadata.
newData.metadata.pop('beamLimits')
#Apply the time limits.
if currentData.metadata.has_key('timeLimits'):
limits = currentData.metadata['timeLimits']
timeInx = np.where(np.logical_and(currentData.time >= limits[0],currentData.time <= limits[1]))[0]
newData.data = newData.data[timeInx,:,:]
newData.time = newData.time[timeInx]
commentList.append('time: '+limits[0].strftime('%Y-%m-%d/%H:%M,')+limits[1].strftime('%Y-%m-%d/%H:%M'))
#Remove limiting item from metadata.
newData.metadata.pop('timeLimits')
#Update the history with what limits were applied.
comment = 'Limits Applied'
commentStr = '['+newData.metadata['dataSetName']+'] '+comment+': '+'; '.join(commentList)
key = max(newData.history.keys())
newData.history[key] = commentStr
logging.debug(commentStr)
newData.setActive()
return newData
except:
if hasattr(dataObj,newDataSetName): delattr(dataObj,newDataSetName)
# print 'Warning! Limits not applied.'
return currentData
def determineRelativePosition(dataObj,dataSet='active',altitude=250.):
"""Finds the center cell of the field-of-view of a musicArray data object.
The range, azimuth, x-range, and y-range from the center to each cell in the FOV
is calculated and saved to the FOV object. The following objects are added to
dataObj.dataSet:
fov.relative_centerInx: [beam, gate] index of the center cell
fov.relative_azm: Azimuth relative to center cell [deg]
fov.relative_range: Range relative to center cell [km]
fov.relative_x: X-range relative to center cell [km]
fov.relative_y: Y-range relative to center cell [km]
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
altitude : Optional[float]
altitude added to Re = 6378.1 km [km]
Returns
-------
None
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
#Get the chosen dataset.
currentData = getDataSet(dataObj,dataSet)
#Determine center beam.
ctrBeamInx = len(currentData.fov.beams)/2
ctrGateInx = len(currentData.fov.gates)/2
currentData.fov.relative_centerInx = [ctrBeamInx, ctrGateInx]
#Set arrays of lat1/lon1 to the center cell value. Use this to calculate all other positions
#with numpy array math.
lat1 = np.zeros_like(currentData.fov.latCenter)
lon1 = np.zeros_like(currentData.fov.latCenter)
lat1[:] = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
lon1[:] = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
#Make lat2/lon2 the center position array of the dataset.
lat2 = currentData.fov.latCenter
lon2 = currentData.fov.lonCenter
#Calculate the azimuth and distance from the centerpoint to the endpoint.
azm = utils.greatCircleAzm(lat1,lon1,lat2,lon2)
dist = (Re + altitude)*utils.greatCircleDist(lat1,lon1,lat2,lon2)
#Save calculated values to the current data object, as well as calculate the
#X and Y relatvie positions of each cell.
currentData.fov.relative_azm = azm
currentData.fov.relative_range = dist
currentData.fov.relative_x = dist * np.sin(np.radians(azm))
currentData.fov.relative_y = dist * np.cos(np.radians(azm))
return None
def timeInterpolation(dataObj,dataSet='active',newDataSetName='timeInterpolated',comment='Time Linear Interpolation',timeRes=10,newTimeVec=None):
"""Interpolates the data in a musicArray object to a regular time grid.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
timeRes : Optional[float]
time resolution of new time vector [seconds]
newTimeVec : Optional[list of datetime.datetime]
Sequence of datetime.datetime objects that data will be interpolated to. This overides timeRes.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
sTime = currentData.time[0]
sTime = datetime.datetime(sTime.year,sTime.month,sTime.day,sTime.hour,sTime.minute) #Make start time a round time.
fTime = currentData.time[-1]
#Create new time vector.
if newTimeVec == None:
newTimeVec = [sTime]
while newTimeVec[-1] < fTime:
newTimeVec.append(newTimeVec[-1] + datetime.timedelta(seconds=timeRes))
#Ensure that the new time vector is within the bounds of the actual data set.
newTimeVec = np.array(newTimeVec)
good = np.where(np.logical_and(newTimeVec > min(currentData.time),newTimeVec < max(currentData.time)))
newTimeVec = newTimeVec[good]
newEpochVec = utils.datetimeToEpoch(newTimeVec)
#Initialize interpolated data.
nrTimes = len(newTimeVec)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for rg in range(nrGates):
for bb in range(nrBeams):
input_x = currentData.time[:]
input_y = currentData.data[:,bb,rg]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
input_x = utils.datetimeToEpoch(input_x)
intFn = interp1d(input_x,input_y,bounds_error=False)#,fill_value=0)
interpArr[:,bb,rg] = intFn(newEpochVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.time = newTimeVec
newDataSet.data = interpArr
newDataSet.setActive()
def filterTimes(sTime,eTime,timeRes,numTaps):
"""The linear filter is going to cause a delay in the signal and also won't get to the end of the signal.
This function will calcuate the full time period of data that needs to be loaded in order to provide filtered data
for the event requested.
Parameters
----------
sTime : datetime.datetime
Start time of event.
eTime : datetime.datetime
End time of event.
timeRes : float
Time resolution in seconds of data to be sent to filter.
numtaps : int
Length of the filter
Returns
-------
newSTime, newETime : datetime.datetime, datetime.datetime
Start and end times of data that needs to be fed into the filter.
Written by Nathaniel A. Frissell, Fall 2013
"""
td = datetime.timedelta(seconds=(numTaps*timeRes/2.))
newSTime = sTime - td
newETime = eTime + td
return (newSTime, newETime)
class filter(object):
"""Filter a VT sig/sigStruct object and define a FIR filter object.
If only cutoff_low is defined, this is a high pass filter.
If only cutoff_high is defined, this is a low pass filter.
If both cutoff_low and cutoff_high is defined, this is a band pass filter.
Uses scipy.signal.firwin()
High pass and band pass filters inspired by Matti Pastell's page:
http://mpastell.com/2010/01/18/fir-with-scipy/
Metadata keys:
'filter_cutoff_low' --> cutoff_low
'filter_cutoff_high' --> cutoff_high
'filter_numtaps' --> cutoff_numtaps
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
numtaps : Optional[int]
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
If dataObj.dataSet.metadata['filter_numptaps'] is set and this keyword is None,
the metadata value will be used.
cutoff_low : Optional[float, 1D array_like or None]
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`. If None, a low-pass filter will not
be applied.
If dataObj.dataSet.metadata['filter_cutoff_low'] is set and this keyword is None,
the metadata value will be used.
cutoff_high : Optional[float, 1D array_like, or None]
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
If dataObj.dataSet.metadata['filter_cutoff_high'] is set and this keyword is None,
the metadata value will be used.
width : Optional[float]
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : Optional[string or tuple of string and parameter values]
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : Optional[bool]
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : Optional[bool]
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero is True);
nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
Attributes
----------
comment : str
cutoff_low : float, 1D array_like or None
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges).
cutoff_high : float, 1D array_like, or None
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
nyq : float
the Nyquist rate
ir :
Methods
-------
plotTransferFunction
plotImpulseResponse
filter
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, dataObj, dataSet='active', numtaps=None, cutoff_low=None, cutoff_high=None, width=None, window='blackman', pass_zero=True, scale=True,newDataSetName='filtered'):
import scipy as sp
sigObj = getattr(dataObj,dataSet)
nyq = sigObj.nyquistFrequency()
#Get metadata for cutoffs and numtaps.
md = sigObj.metadata
if cutoff_high == None:
if md.has_key('filter_cutoff_high'):
cutoff_high = md['filter_cutoff_high']
if cutoff_low == None:
if md.has_key('filter_cutoff_low'):
cutoff_low = md['filter_cutoff_low']
if numtaps == None:
if md.has_key('filter_numtaps'):
numtaps = md['filter_numtaps']
else:
logging.warning('You must provide numtaps.')
return
if cutoff_high != None: #Low pass
lp = sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_high, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
d = lp
if cutoff_low != None: #High pass
hp = -sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_low, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
hp[numtaps/2] = hp[numtaps/2] + 1
d = hp
if cutoff_high != None and cutoff_low != None:
d = -(lp+hp)
d[numtaps/2] = d[numtaps/2] + 1
d = -1.*d #Needed to correct 180 deg phase shift.
if cutoff_high == None and cutoff_low == None:
logging.warning("You must define cutoff frequencies!")
return
self.comment = ' '.join(['Filter:',window+',','Nyquist:',str(nyq),'Hz,','Cuttoff:','['+str(cutoff_low)+', '+str(cutoff_high)+']','Hz,','Numtaps:',str(numtaps)])
self.cutoff_low = cutoff_low
self.cutoff_high = cutoff_high
self.nyq = nyq
self.ir = d
self.filter(dataObj,dataSet=dataSet,newDataSetName=newDataSetName)
def __str__(self):
return self.comment
def plotTransferFunction(self,xmin=0,xmax=None,ymin_mag=-150,ymax_mag=5,ymin_phase=None,ymax_phase=None,worN=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_mag : Optional[float]
Minimum value for y-axis for the frequency response plot.
ymax_mag : Optional[float]
Maximum value for y-axis for the frequency response plot.
ymin_phase : Optional[float]
Minimum value for y-axis for the phase response plot.
ymax_phase : Optional[float]
Maximum value for y-axis for the phase response plot.
worN : Optional[int]
passed to scipy.signal.freqz()
If None, then compute at 512 frequencies around the unit circle.
If the len(filter) > 512, then compute at len(filter) frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
if worN == None:
if len(self.ir) > 512: worN = len(self.ir)
else: worN = None
else: pass
w,h = sp.signal.freqz(self.ir,1,worN=worN)
h_dB = 20 * np.log10(abs(h))
axis = fig.add_subplot(211)
#Compute frequency vector.
w = w/max(w) * self.nyq
axis.plot(w,h_dB,'.-')
#mp.axvline(x=self.fMax,color='r',ls='--',lw=2)
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_mag is not None: axis.set_ylim(ymin=ymin_mag)
if ymax_mag is not None: axis.set_ylim(ymax=ymax_mag)
axis.set_xlabel(r'Frequency (Hz)')
axis.set_ylabel('Magnitude (db)')
axis.set_title(r'Frequency response')
axis = fig.add_subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
axis.plot(w,h_Phase,'.-')
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_phase is not None: axis.set_ylim(ymin=ymin_phase)
if ymax_phase is not None: axis.set_ylim(ymax=ymax_phase)
axis.set_ylabel('Phase (radians)')
axis.set_xlabel(r'Frequency (Hz)')
axis.set_title(r'Phase response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def plotImpulseResponse(self,xmin=None,xmax=None,ymin_imp=None,ymax_imp=None,ymin_step=None,ymax_step=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_imp : Optional[float]
Minimum value for y-axis for the impulse response plot.
ymax_imp : Optional[float]
Maximum value for y-axis for the impulse response plot.
ymin_step : Optional[float]
Minimum value for y-axis for the step response plot.
ymax_step : Optional[float]
Maximum value for y-axis for the step response plot.
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
l = len(self.ir)
impulse = np.repeat(0.,l); impulse[0] =1.
x = np.arange(0,l)
response = sp.signal.lfilter(self.ir,1,impulse)
axis = fig.add_subplot(211)
axis.stem(x, response)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Impulse response')
axis = fig.add_subplot(212)
step = np.cumsum(response)
axis.stem(x, step)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Step response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def filter(self,dataObj,dataSet='active',newDataSetName='filtered'):
"""Apply the filter to a vtsig object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
sigobj = getattr(dataObj,dataSet)
vtsig = sigobj.parent
nrTimes,nrBeams,nrGates = np.shape(sigobj.data)
#Filter causes a delay in the signal and also doesn't get the tail end of the signal... Shift signal around, provide info about where the signal is valid.
shift = np.int32(-np.floor(len(self.ir)/2.))
start_line = np.zeros(nrTimes)
start_line[0] = 1
start_line = np.roll(start_line,shift)
tinx0 = abs(shift)
tinx1 = np.where(start_line == 1)[0][0]
val_tm0 = sigobj.time[tinx0]
val_tm1 = sigobj.time[tinx1]
filteredData = np.zeros_like(sigobj.data)
#Apply filter
for bm in range(nrBeams):
for rg in range(nrGates):
tmp = sp.signal.lfilter(self.ir,[1.0],sigobj.data[:,bm,rg])
tmp = np.roll(tmp,shift)
filteredData[:,bm,rg] = tmp[:]
#Create new signal object.
newsigobj = sigobj.copy(newDataSetName,self.comment)
#Put in the filtered data.
newsigobj.data = copy.copy(filteredData)
newsigobj.time = copy.copy(sigobj.time)
#Clear out ymin and ymax from metadata; make sure meta data block exists.
#If not, create it.
if hasattr(newsigobj,'metadata'):
delMeta = ['ymin','ymax','ylim']
for key in delMeta:
if newsigobj.metadata.has_key(key):
del newsigobj.metadata[key]
else:
newsigobj.metadata = {}
newsigobj.metadata['timeLimits'] = (val_tm0,val_tm1)
key = 'title'
if newsigobj.metadata.has_key(key):
newsigobj.metadata[key] = ' '.join(['Filtered',newsigobj.metadata[key]])
else:
newsigobj.metadata[key] = 'Filtered'
newsigobj.metadata['fir_filter'] = (self.cutoff_low,self.cutoff_high)
newsigobj.setActive()
def detrend(dataObj,dataSet='active',newDataSetName='detrended',comment=None,type='linear'):
"""Linearly detrend a data in a musicArray/musicDataObj object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
type : Optional[str]
The type of detrending. If type == 'linear' (default), the result of a linear least-squares fit to data
is subtracted from data. If type == 'constant', only the mean of data is subtracted.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
try:
newDataArr[:,bm,rg] = sp.signal.detrend(currentData.data[:,bm,rg],type=type)
except:
newDataArr[:,bm,rg] = np.nan
if comment == None:
comment = type.capitalize() + ' detrend (scipy.signal.detrend)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def nan_to_num(dataObj,dataSet='active',newDataSetName='nan_to_num',comment=None):
"""Convert all NANs and INFs to finite numbers using numpy.nan_to_num().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
if comment == None:
comment = 'numpy.nan_to_num'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = np.nan_to_num(currentData.data)
newDataSet.setActive()
def windowData(dataObj,dataSet='active',newDataSetName='windowed',comment=None,window='hann'):
"""Apply a window to a musicArray object. The window is calculated using scipy.signal.get_window().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
window : Optional[str]
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall,
barthann, kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
win = sp.signal.get_window(window,nrTimes,fftbins=False)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = currentData.data[:,bm,rg] * win
if comment == None:
comment = window.capitalize() + ' window applied (scipy.signal.get_window)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def calculateFFT(dataObj,dataSet='active',comment=None):
"""Calculate the spectrum of an object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Determine frequency axis.
nyq = currentData.nyquistFrequency()
freq_ax = np.arange(nrTimes,dtype='f8')
freq_ax = (freq_ax / max(freq_ax)) - 0.5
freq_ax = freq_ax * 2. * nyq
#Use complex64, not complex128! If you use complex128, too much numerical noise will accumulate and the final plot will be bad!
newDataArr= np.zeros((nrTimes,nrBeams,nrGates),dtype=np.complex64)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = sp.fftpack.fftshift(sp.fftpack.fft(currentData.data[:,bm,rg])) / np.size(currentData.data[:,bm,rg])
currentData.freqVec = freq_ax
currentData.spectrum = newDataArr
# Calculate the dominant frequency #############################################
posFreqInx = np.where(currentData.freqVec >= 0)[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) #Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) #Use the magnitude of the positive frequency data.
#Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
currentData.dominantFreq = posFreqVec[np.argmax(avg_psd)]
currentData.appendHistory('Calculated FFT')
def calculateDlm(dataObj,dataSet='active',comment=None):
"""Calculate the cross-spectral matrix of a musicaArray object. FFT must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
nCells = nrBeams * nrGates
currentData.llLookupTable = np.zeros([5,nCells])
currentData.Dlm = np.zeros([nCells,nCells],dtype=np.complex128)
#Only use positive frequencies...
posInx = np.where(currentData.freqVec > 0)[0]
#Explicitly write out gate/range indices...
llList = []
for gg in xrange(nrGates):
for bb in xrange(nrBeams):
llList.append((bb,gg))
for ll in range(nCells):
llAI = llList[ll]
ew_dist = currentData.fov.relative_x[llAI]
ns_dist = currentData.fov.relative_y[llAI]
currentData.llLookupTable[:,ll] = [ll, currentData.fov.beams[llAI[0]], currentData.fov.gates[llAI[1]],ns_dist,ew_dist]
spectL = currentData.spectrum[posInx,llAI[0],llAI[1]]
for mm in range(nCells):
mmAI = llList[mm]
spectM = currentData.spectrum[posInx,mmAI[0],mmAI[1]]
currentData.Dlm[ll,mm] = np.sum(spectL * np.conj(spectM))
currentData.appendHistory('Calculated Cross-Spectral Matrix Dlm')
def calculateKarr(dataObj,dataSet='active',kxMax=0.05,kyMax=0.05,dkx=0.001,dky=0.001,threshold=0.15):
"""Calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Cross-spectrum array Dlm must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
kxMax : Optional[float]
Maximum kx (East-West) wavenumber to calculate [rad/km]
kyMax : Optional[float]
Maximum ky (North-South) wavenumber to calculate [rad/km]
dkx : Optional[float]
kx resolution [rad/km]
dky : Optional[float]
ky resolution [rad/km]
threshold : Optional[float]
threshold of signals to detect as a fraction of the maximum eigenvalue
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Calculate eigenvalues, eigenvectors
eVals,eVecs = np.linalg.eig(np.transpose(dataObj.active.Dlm))
nkx = np.ceil(2*kxMax/dkx)
if (nkx % 2) == 0: nkx = nkx+1
kxVec = kxMax * (2*np.arange(nkx)/(nkx-1) - 1)
nky = np.ceil(2*kyMax/dky)
if (nky % 2) == 0: nky = nky+1
kyVec = kyMax * (2*np.arange(nky)/(nky-1) - 1)
nkx = int(nkx)
nky = int(nky)
xm = currentData.llLookupTable[4,:] #x is in the E-W direction.
ym = currentData.llLookupTable[3,:] #y is in the N-S direction.
threshold = 0.15
maxEval = np.max(np.abs(eVals))
minEvalsInx = np.where(eVals <= threshold*maxEval)[0]
cnt = np.size(minEvalsInx)
maxEvalsInx = np.where(eVals > threshold*maxEval)[0]
nSigs = np.size(maxEvalsInx)
if cnt < 3:
logging.warning('Not enough small eigenvalues!')
import ipdb; ipdb.set_trace()
logging.info('K-Array: ' + str(nkx) + ' x ' + str(nky))
logging.info('Kx Max: ' + str(kxMax))
logging.info('Kx Res: ' + str(dkx))
logging.info('Ky Max: ' + str(kyMax))
logging.info('Ky Res: ' + str(dky))
logging.info('')
logging.info('Signal Threshold: ' + str(threshold))
logging.info('Number of Det Signals: ' + str(nSigs))
logging.info('Number of Noise Evals: ' + str(cnt))
logging.info('Starting kArr Calculation...')
t0 = datetime.datetime.now()
def vCalc(um,v):
return np.dot( np.conj(um), v) * np.dot( np.conj(v), um)
vList = [eVecs[:,minEvalsInx[ee]] for ee in xrange(cnt)]
kArr = np.zeros((nkx,nky),dtype=np.complex64)
for kk_kx in xrange(nkx):
kx = kxVec[kk_kx]
for kk_ky in xrange(nky):
ky = kyVec[kk_ky]
um = np.exp(1j*(kx*xm + ky*ym))
kArr[kk_kx,kk_ky]= 1. / np.sum(map(lambda v: vCalc(um,v), vList))
t1 = datetime.datetime.now()
logging.info('Finished kArr Calculation. Total time: ' + str(t1-t0))
currentData.karr = kArr
currentData.kxVec = kxVec
currentData.kyVec = kyVec
currentData.appendHistory('Calculated kArr')
def simulator(dataObj, dataSet='active',newDataSetName='simulated',comment=None,keepLocalRange=True,sigs=None,noiseFactor=0):
"""Replace SuperDARN Data with simulated MSTID(s). This is useful for understanding how the signal processing
routines of this module affect ideal data.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
keepLocalRange : Optional[bool]
If true, the locations calculated for the actual radar field of view will be used. If false,
a linearly-spaced will replace the true grid.
sigs : Optional[list of tuples]
A list of tuples defining the characteristics of the simulated signal. Sample list is as follows.
If this keyword is None, the values in this sample list are used as the default values.::
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
Each signal is evaluated as a cosine and then summed together. The cosine evaluated is::
sig = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
noiseFactor : Optional[float]
Add white gaussian noise to the simulated signal. noiseFactor is a scalar such that:
noise = noiseFactor*np.random.standard_normal(nSteps)
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
#Typical TID Parameters:
# Frequency: 0.0003 mHz
# Period: 55.5 min
# H. Wavelength: 314 km
# k: 0.02 /km
if keepLocalRange == True:
nx, ny = np.shape(currentData.fov.relative_x)
xRange = np.max(currentData.fov.relative_x) - np.min(currentData.fov.relative_x)
yRange = np.max(currentData.fov.relative_y) - np.min(currentData.fov.relative_y)
xgrid = currentData.fov.relative_x
ygrid = currentData.fov.relative_y
else:
nx = 16
xRange = 800.
ny = 25
yRange = 600.
xvec = np.linspace(-xRange/2.,xRange/2.,nx)
yvec = np.linspace(-yRange/2.,yRange/2.,ny)
dx = np.diff(xvec)[0]
dy = np.diff(yvec)[0]
xaxis = np.append(xvec,xvec[-1]+dx)
yayis = np.append(yvec,yvec[-1]+dy)
xgrid = np.zeros((nx,ny))
ygrid = np.zeros((nx,ny))
for kk in xrange(nx): ygrid[kk,:] = yvec[:]
for kk in xrange(ny): xgrid[kk,:] = yvec[:]
if sigs == None:
#Set some default signals.
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
secVec = np.array(utils.datetimeToEpoch(currentData.time))
secVec = secVec - secVec[0]
nSteps = len(secVec)
dt = currentData.samplePeriod()
dataArr = np.zeros((nSteps,nx,ny))
for step in xrange(nSteps):
t = secVec[step]
for kk in xrange(len(sigs)):
amp = sigs[kk][0]
kx = sigs[kk][1]
ky = sigs[kk][2]
f = sigs[kk][3]
phi = sigs[kk][4]
dc = sigs[kk][5]
if 1./dt <= 2.*f:
logging.warning('Nyquist Violation in f.')
logging.warning('Signal #: %i' % kk)
# if 1./dx <= 2.*kx/(2.*np.pi):
# print 'WARNING: Nyquist Violation in kx.'
# print 'Signal #: %i' % kk
#
# if 1./dy <= 2.*ky/(2.*np.pi):
# print 'WARNING: Nyquist Violation in ky.'
# print 'Signal #: %i' % kk
temp = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
dataArr[step,:,:] = dataArr[step,:,:] + temp
#Signal RMS
sig_rms = np.zeros((nx,ny))
for xx in xrange(nx):
for yy in xrange(ny):
sig_rms[xx,yy] = np.sqrt(np.mean((dataArr[:,xx,yy])**2.))
noise_rms = np.zeros((nx,ny))
if noiseFactor > 0:
nf = noiseFactor
#Temporal White Noise
for xx in xrange(nx):
for yy in xrange(ny):
noise = nf*np.random.standard_normal(nSteps)
noise_rms[xx,yy] = np.sqrt(np.mean(noise**2))
dataArr[:,xx,yy] = dataArr[:,xx,yy] + noise
xx = np.arange(ny)
mu = (ny-1.)/2.
sigma2 = 10.0
sigma = np.sqrt(sigma2)
rgDist = 1./(sigma*np.sqrt(2.*np.pi)) * np.exp(-0.5 * ((xx-mu)/sigma)**2)
rgDist = rgDist / np.max(rgDist)
mask = np.zeros((nx,ny))
for nn in xrange(nx): mask[nn,:] = rgDist[:]
mask3d = np.zeros((nSteps,nx,ny))
for nn in xrange(nSteps): mask3d[nn,:,:] = mask[:]
#Apply Range Gate Dependence
dataArr = dataArr * mask3d
snr = (sig_rms/noise_rms)**2
snr_db = 10.*np.log10(snr)
if comment == None:
comment = 'Simulated data injected.'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = dataArr
newDataSet.setActive()
#OPENW,unit,'simstats.txt',/GET_LUN,WIDTH=300
#stats$ = ' Mean: ' + NUMSTR(MEAN(sig_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(sig_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(sig_rms)^2,3)
#PRINTF,unit,'SIG_RMS'
#PRINTF,unit,stats$
#PRINTF,unit,sig_rms
#
#PRINTF,unit,''
#PRINTF,unit,'NOISE_RMS'
#stats$ = ' Mean: ' + NUMSTR(MEAN(noise_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(noise_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(noise_rms)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,noise_rms
#
#PRINTF,unit,''
#PRINTF,unit,'SNR_DB'
#stats$ = ' Mean: ' + NUMSTR(MEAN(snr_db),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(snr_db),3) $
# + ' Var: ' + NUMSTR(STDDEV(snr_db)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,snr_db
#CLOSE,unit
def scale_karr(kArr):
from scipy import stats
"""Scale/normalize kArr for plotting and signal detection.
Parameters
----------
kArr : 2D numpy.array
Two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Returns
-------
data : 2D numpy.array
Scaled and normalized version of kArr.
Written by Nathaniel A. Frissell, Fall 2013
"""
data = np.abs(kArr) - np.min(np.abs(kArr))
#Determine scale for colorbar.
scale = [0.,1.]
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
return data
def detectSignals(dataObj,dataSet='active',threshold=0.35,neighborhood=(10,10)):
"""Automatically detects local maxima/signals in a calculated kArr. This routine uses the watershed
algorithm from the skimage image processing library. Results are automatically stored in
dataObj.dataSet.sigDetect.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
threshold : Optional[float]
Scaled input data must be above this value to be detected. A higher number
will reduce the number of signals detected.
neighborhood : Optional[tuple]
Local region in which to search for peaks at every point in the image/array.
(10,10) will search a 10x10 pixel area.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
################################################################################
#Feature detection...
#Now lets do a little image processing...
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
#sudo pip install cython
#sudo pip install scikit-image
data = scale_karr(currentData.karr)
mask = data > threshold
labels, nb = ndimage.label(mask)
distance = ndimage.distance_transform_edt(mask)
local_maxi = peak_local_max(distance,footprint=np.ones(neighborhood),indices=False)
markers,nb = ndimage.label(local_maxi)
labels = watershed(-distance,markers,mask=mask)
areas = ndimage.sum(mask,labels,xrange(1,labels.max()+1))
maxima = ndimage.maximum(data,labels,xrange(1, labels.max()+1))
order = np.argsort(maxima)[::-1] + 1
maxpos = ndimage.maximum_position(data,labels,xrange(1, labels.max()+1))
sigDetect = SigDetect()
sigDetect.mask = mask
sigDetect.labels = labels
sigDetect.nrSigs = nb
sigDetect.info = []
for x in xrange(labels.max()):
info = {}
info['labelInx'] = x+1
info['order'] = order[x]
info['area'] = areas[x]
info['max'] = maxima[x]
info['maxpos'] = maxpos[x]
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = currentData.dominantFreq
info['period'] = 1./currentData.dominantFreq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
sigDetect.info.append(info)
currentData.appendHistory('Detected KArr Signals')
currentData.sigDetect = sigDetect
return currentData
def add_signal(kx,ky,dataObj,dataSet='active',frequency=None):
"""Manually add a signal to the detected signal list. All signals will be re-ordered according to value in the
scaled kArr. Added signals can be distinguished from autodetected signals because
'labelInx' and 'area' will both be set to -1.
Parameters
----------
kx : float
Value of kx of new signal.
ky : float
Value of ky of new signal.
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
frequency : Optional[float]
Frequency to use to calculate period, phase velocity, etc. If None,
the calculated dominant frequency will be used.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
def find_nearest_inx(array,value):
return (np.abs(array-value)).argmin()
kx_inx = find_nearest_inx(currentData.kxVec,kx)
ky_inx = find_nearest_inx(currentData.kyVec,ky)
maxpos = (kx_inx,ky_inx)
value = data[kx_inx,ky_inx]
true_value = currentData.karr[kx_inx,ky_inx] #Get the unscaled kArr value.
if frequency == None:
freq = currentData.dominantFreq
else:
freq = frequency
info = {}
info['labelInx'] = -1
info['area'] = -1
info['order'] = -1
info['max'] = value
info['true_max'] = true_value #Unscaled kArr value
info['maxpos'] = maxpos
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = freq
info['period'] = 1./freq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
currentData.sigDetect.info.append(info)
currentData.sigDetect.reorder()
currentData.appendHistory('Appended Signal to sigDetect List')
return currentData
def del_signal(order,dataObj,dataSet='active'):
"""Remove a signal to the detected signal list.
Parameters
----------
order :
Single value of list of signal orders (ID's) to be removed from the list.
dataObj : musicArray
object
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
orderArr = np.array(order)
for item in list(currentData.sigDetect.info):
if item['order'] in orderArr:
currentData.sigDetect.info.remove(item)
currentData.sigDetect.reorder()
currentData.appendHistory('Deleted Signals from sigDetect List')
return currentData
| gpl-3.0 |
cbertinato/pandas | pandas/tests/test_downstream.py | 1 | 4179 | """
Testing that we work in the downstream packages
"""
import importlib
import subprocess
import sys
import numpy as np # noqa
import pytest
from pandas.compat import PY36
from pandas import DataFrame
from pandas.util import testing as tm
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="pandas-wheels-50", strict=False)
def test_missing_required_dependency():
# GH 23868
# To ensure proper isolation, we pass these flags
# -S : disable site-packages
# -s : disable user site-packages
# -E : disable PYTHON* env vars, especially PYTHONPATH
# And, that's apparently not enough, so we give up.
# https://github.com/MacPython/pandas-wheels/pull/50
call = ['python', '-sSE', '-c', 'import pandas']
with pytest.raises(subprocess.CalledProcessError) as exc:
subprocess.check_output(call, stderr=subprocess.STDOUT)
output = exc.value.stdout.decode()
for name in ['numpy', 'pytz', 'dateutil']:
assert name in output
| bsd-3-clause |
abhijeet-talaulikar/Automatic-Helmet-Detection | K-Fold/Logistic_Regression.py | 1 | 2663 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import *
from timeit import default_timer as timer
from random import randint
from sklearn.feature_selection import *
from sklearn.decomposition import PCA
helmet_data = np.genfromtxt ('helmet.csv', delimiter=",")
face_data = np.genfromtxt ('face.csv', delimiter=",")
data_full = np.concatenate((helmet_data, face_data), 0)
np.random.shuffle(data_full) #shuffle the tuples
#feature reduction (on HOG part)
#gain, j = mutual_info_classif(data_full[:, 8:-1], data_full[:, -1], discrete_features='auto', n_neighbors=3, copy=True, random_state=None), 0
#for i in np.arange(len(gain)):
# if gain[i] <= 0.001:
# data_full = np.delete(data_full, 8+i-j, 1)
# j += 1
#data = np.copy(data_full)
#principal component analysis
pca = PCA(n_components=150)
data = pca.fit_transform(data_full[:, 8:-1])
data = np.concatenate((data_full[:, 0:8], data, np.array([data_full[:, -1]]).T), axis=1)
precision, recall, f1, accuracy, support, fn, roc_auc = 0, 0, 0, 0, 0, 0, 0
colors = ['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange']
k = 10
kf = KFold(n_splits = k)
start = timer()
for train, test in kf.split(data):
X_train, X_test = data[train, 0:-1], data[test, 0:-1]
y_train, y_test = data[train, -1], data[test, -1]
clf = LogisticRegression().fit(X_train, y_train)
y_pred = clf.predict(X_test)
#ROC curve
y_prob = clf.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob, pos_label=1)
roc_auc += auc(fpr, tpr)
plt.plot(fpr, tpr, color=colors[randint(0, len(colors)-1)])
precision += precision_score(y_test, y_pred, average = 'macro')
recall += recall_score(y_test, y_pred, average = 'macro')
f1 += f1_score(y_test, y_pred, average = 'macro')
accuracy += accuracy_score(y_test, y_pred)
y = y_test - y_pred
fn += sum(y[y > 0]) / len(y_test)
end = timer()
precision /= k
recall /= k
f1 /= k
accuracy /= k
fn /= k
print("Precision \t: %s" % round(precision, 4))
print("Recall \t\t: %s" % round(recall, 4))
print("F1 \t\t: %s" % round(f1, 4))
print("Accuracy \t: %s" % round(accuracy, 4))
print("False Neg \t: %s%%" % round(fn * 100, 4))
print("Mean AUC \t: %s" % round(roc_auc / k, 4))
print("\nExecution time: %s ms" % round((end - start) * 1000, 4))
#ROC curve
plt.title('Logistic Regression (AUC = %s)' % round(roc_auc, 4))
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.05,1.0])
plt.ylim([0.0,1.05])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| gpl-3.0 |
looooo/paraBEM | examples/plots/lifting_line.py | 1 | 1404 | from __future__ import division
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import paraBEM
from paraBEM.liftingline import LiftingLine
from paraBEM.utils import check_path
# WingGeometry
spw = 2
numpos = 50
z_fac_1 = -0.3
z_fac_2 = -0.7
y = np.sin(np.linspace(0, np.pi/2, numpos)) * spw/2
x = [0. for _ in y]
z = [i**2 * z_fac_1 + i**6 * z_fac_2 for i in y]
mirror = lambda xyz: [xyz[0], -xyz[1], xyz[2]]
wing = list(zip(x, y, z))
wing = list(map(mirror, wing))[::-1] + list(wing)[1:]
wing = [paraBEM.Vector3(*i) for i in wing]
# LiftingLine
lifting_line = LiftingLine(wing)
lifting_line.v_inf = paraBEM.Vector3(1, 0, 0)
lifting_line.solve_for_best_gamma(1)
gamma = [i.best_gamma for i in lifting_line.segments]
gamma_max = max(gamma)
# Plot
gamma_el = lambda y: gamma_max * (1 - (y / spw * 2)**2)**(1 / 2)
mids = [[i.mids.x, i.mids.y, i.mids.z] for i in lifting_line.segments]
x, y, z = zip(*mids)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(y, z)
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(y, x, marker="x")
ax3 = fig.add_subplot(3, 1, 3)
y_el = np.linspace(-1, 1, 400)
ax3.plot([-spw/2] + list(y) + [spw/2], [0] + gamma + [0], marker="x")
ax3.plot(y_el, list(map(gamma_el, y_el)))
plt.savefig(check_path("results/2d/liftingline.png"))
total = 0
for i in lifting_line.segments:
total += i.lift_factor * i.best_gamma
print(total)
| gpl-3.0 |
tashaxe/Red-DiscordBot | lib/youtube_dl/extractor/wsj.py | 7 | 4311 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
https?://(?:www\.)?wsj\.com/video/[^/]+/|
wsj:
)
(?P<id>[a-fA-F0-9-]{36})
'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://video-api.wsj.com/api-video/find_all_videos.asp', video_id,
query={
'type': 'guid',
'count': 1,
'query': video_id,
'fields': ','.join((
'type', 'hls', 'videoMP4List', 'thumbnailList', 'author',
'description', 'name', 'duration', 'videoURL', 'titletag',
'formattedCreationDate', 'keywords', 'editor')),
})['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
class WSJArticleIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
'info_dict': {
'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
'ext': 'mp4',
'upload_date': '20170221',
'uploader_id': 'ralcaraz',
'title': 'Bao Bao the Panda Leaves for China',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
video_id = self._search_regex(
r'data-src=["\']([a-fA-F0-9-]{36})', webpage, 'video id')
return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id)
| gpl-3.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/learn/wide_n_deep_tutorial.py | 18 | 8111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
idlead/scikit-learn | sklearn/externals/joblib/__init__.py | 23 | 4764 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
kikocorreoso/mplutils | mplutils/axes.py | 1 | 8516 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 23:43:37 2016
@author: kiko
"""
from __future__ import division, absolute_import
from .settings import RICH_DISPLAY
import numpy as np
if RICH_DISPLAY:
from IPython.display import display
def axes_set_better_defaults(ax,
axes_color = '#777777',
grid = False,
show = False):
"""
Enter an Axes instance and it will change the defaults to an opinionated
version of how a simple plot should be.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
axes_color : str
A string indicating a valid matplotlib color.
grid : bool
If `True` the grid of the axes will be shown, if `False` (default)
the grid, if active, will be supressed.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_axis_bgcolor((1, 1, 1))
ax.grid(grid)
for key in ax.spines.keys():
if ax.spines[key].get_visible():
ax.spines[key].set_color(axes_color)
ax.tick_params(axis = 'x', colors = axes_color)
ax.tick_params(axis = 'y', colors = axes_color)
ax.figure.set_facecolor('white')
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
# http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html
def axes_set_axis_position(ax,
spines = ['bottom', 'left'],
pan = 0,
show = False):
"""
Enter an Axes instance and depending the options it will display the
axis where you selected.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
spines : str or iterable
A string or an iterable of strings with the following valid options:
'bottom' : To active the bottom x-axis.
'top' : To active the top x-axis.
'left' : To active the left y-axis.
'right' : To active the right y-axis.
pan : int or iterable
A integer value or an iterable of integer values indicating the value
to pan the axis. It has to have the same lenght and the same order
than the spines input.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
if np.isscalar(spines):
spines = (spines,)
len_spines = 1
else:
len_spines = len(spines)
if np.isscalar(pan):
pan = np.repeat(pan, len_spines)
len_pan = 1
else:
len_pan = len(pan)
if len_pan > 1 and len_pan != len_spines:
raise ValueError(('Length of `spines` and `pan` mismatch. `pan` ')
('should be a scalar or should have the same length than `spines`.'))
i = 0
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', pan[i])) # outward by `pan` points
spine.set_smart_bounds(True)
i += 1
else:
#spine.set_color('none') # don't draw spine
spine.set_visible(False)
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
ax.tick_params(labelleft = True)
if 'right' in spines:
ax.yaxis.set_ticks_position('right')
ax.tick_params(labelright = True)
if 'left' in spines and 'right' in spines:
ax.yaxis.set_ticks_position('both')
ax.tick_params(labelleft = True, labelright = True)
if 'left' not in spines and 'right' not in spines:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(labelbottom = True)
if 'top' in spines:
ax.xaxis.set_ticks_position('top')
ax.tick_params(labeltop = True)
if 'bottom' in spines and 'top' in spines:
ax.xaxis.set_ticks_position('both')
ax.tick_params(labelbottom = True, labeltop = True)
if 'bottom' not in spines and 'top' not in spines:
ax.xaxis.set_ticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_origin(ax,
x = 0,
y = 0,
xticks_position = 'bottom',
yticks_position = 'left',
xticks_visible = True,
yticks_visible = True,
show = False):
"""
function to locate x-axis and y-axis on the position you want.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
x : int or float
Value indicating the position on the y-axis where you want the x-axis
to be located.
y : int or float
Value indicating the position on the x-axis where you want the y-axis
to be located.
xticks_position : str
Default value is 'bottom' if you want the ticks to be located below
the x-axis. 'top' if you want the ticks to be located above the x-axis.
yticks_position : str
Default value is 'left' if you want the ticks to be located on the left
side of the y-axis. 'right' if you want the ticks to be located on the
right side of the y-axis.
xticks_visible : bool
Default value is True if you want ticks visible on the x-axis. False
if you don't want to see the ticks on the x-axis.
yticks_visible : bool
Default value is True if you want ticks visible on the y-axis. False
if you don't want to see the ticks on the y-axis.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position(xticks_position)
ax.spines['bottom'].set_position(('data', x))
ax.yaxis.set_ticks_position(yticks_position)
ax.spines['left'].set_position(('data', y))
if not xticks_visible:
ax.set_xticks([])
if not yticks_visible:
ax.set_yticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_aspect_ratio(ax, ratio = 'equal', show = True):
"""
function that accepts an Axes instance and update the information
setting the aspect ratio of the axis to the defined quantity
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
ratio : str or int/float
The value can be a string with the following values:
'equal' : (default) same scaling from data to plot units for x and y
'auto' : automatic; fill position rectangle with data
Or a:
number (int or float) : a circle will be stretched such that the
height is num times the width. aspec t =1 is the same as
aspect='equal'.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_aspect(ratio, adjustable = None)
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show() | mit |
jmschrei/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
huobaowangxi/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
phev8/dataset_tools | experiment_handler/time_synchronisation.py | 1 | 1444 | import os
import pandas as pd
def read_synchronisation_file(experiment_root):
filepath = os.path.join(experiment_root, "labels", "synchronisation.csv")
return pd.read_csv(filepath)
def convert_timestamps(experiment_root, timestamps, from_reference, to_reference):
"""
Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time)
Parameters
----------
experiment_root: str
Root of the current experiment (to find the right synchronisation matrix)
timestamps: float or array like
timestamps to be converted
from_reference: str
name of the reference of the original timestamps
to_reference: str
name of the reference time the timestamp has to be converted to
Returns
-------
converted_timestamps: float or array like
Timestamps given in to_reference time values
"""
synchronisation_file = read_synchronisation_file(experiment_root)
offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0]
converted_timestamps = timestamps + offset
return converted_timestamps
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video")) | mit |
JanNash/sms-tools | lectures/06-Harmonic-model/plots-code/spectral-peaks.py | 22 | 1161 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis, pX,'c', lw=1.5)
plt.axis([0,7000, min(pX),10])
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
comprna/SUPPA | scripts/generate_boxplot_event.py | 1 | 5584 | # The next script will format a phenotype table (junctions, events, trasncripts...)
# for runnning FastQTL analysis
#This version is for formatting the SCLC phenotype
"""
@authors: Juan L. Trincado
@email: juanluis.trincado@upf.edu
generate_boxplot_event.py: Generates a boxplot with the PSI values, given which samples are in which conditions
"""
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import re
from argparse import ArgumentParser, RawTextHelpFormatter
description = \
"Description:\n\n" + \
"This script accept a phenotype table (junctions, events, transcripts...)\n" + \
"and a genotype table (mutations associated to K-mers or SMRs) and returns a formatted table\n" + \
"for using with FastQTL"
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter,
add_help=True)
parser.add_argument("-i", "--input", required=True,
help="Input file")
parser.add_argument("-e", "--event", required=True, type=str,
help="Event to plot")
parser.add_argument('-g', '--groups',
action="store",
required=True,
type=str,
nargs="*",
help="Ranges of column numbers specifying the replicates per condition. "
"Column numbers have to be continuous, with no overlapping or missing columns between them. "
"Ex: 1-3,4-6")
parser.add_argument('-c', '--conds',
action="store",
required=False,
default="0",
type=str,
nargs="*",
help="Name of each one of the conditions. Ex: Mutated,Non_mutated")
parser.add_argument("-o", "--output", required=True,
help="Output path")
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def main():
args = parser.parse_args()
input_file = args.input
event = args.event
groups = re.findall(r"[\w]+", args.groups[0])
output_path = args.output
# input_file = "/home/juanluis/Desktop/Work/Master_class/events.psi"
# event = "ENSG00000149554;SE:chr11:125496728-125497502:125497725-125499127:+"
# groups = ['1','3','4','6']
# output_path = "/home/juanluis/Desktop/Work/Master_class/"
try:
logger.info("Reading input file...")
dict_PSI = {}
cond = 1
success = False
file = open(input_file)
for line in file:
tokens = line.rstrip().split("\t")
if (tokens[0]==event):
success = True
for i,x in enumerate(groups):
if(i%2==1):
continue
PSI = []
samples = range(int(groups[i]),int(groups[i+1])+1)
#Get the PSI of this group of samples
for j in samples:
PSI.append(tokens[j])
dict_PSI[cond] = PSI
cond = cond + 1
break
if(success):
#Create the boxplot
data_to_plot = []
for key in dict_PSI.keys():
data_to_plot.append(list(map(float,dict_PSI[key])))
# Create a figure instance
fig = plt.figure(figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot, patch_artist=True, sym='')
# change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='.', color='#000000', alpha=0.7)
# Assign different colors
colors = ['lightblue', 'pink']
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
for j in range(len(data_to_plot)):
y = data_to_plot[j]
x = np.random.normal(1 + j, 0.02, size=len(y))
plt.plot(x, y, 'ko', alpha=0.5)
# Custom x-axis labels if the user has input conditions
if (args.conds != "0"):
conditions = re.findall(r"[\w]+", args.conds[0])
ax.set_xticklabels(conditions)
# Leave just ticks in the bottom
ax.get_xaxis().tick_bottom()
ax.set_ylabel('PSI')
# Set the title
title = "Event: " + event
ax.set_title(title, fontsize=10)
# Add a horizontal grid to the plot,
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# Set the limits for the y axes
ax.set_ylim([-0.05, 1.05])
# Save the figure
output_path = output_path + "/" + event + ".png"
logger.info("Created " + output_path)
fig.savefig(output_path, bbox_inches='tight')
else:
logger.info("Event not found.")
logger.info("Done.")
exit(0)
except Exception as error:
logger.error(repr(error))
logger.error("Aborting execution")
sys.exit(1)
if __name__ == '__main__':
main() | mit |
JFriel/honours_project | networkx/build/lib/networkx/convert_matrix.py | 10 | 33329 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
try:
import pandas
except:
raise SkipTest("Pandas not available")
| gpl-3.0 |
ralbayaty/KaggleRetina | testing/censureHistCalc.py | 1 | 4517 | from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
from PIL import Image, ImageDraw
def draw_keypoints(img, kp, scale):
draw = ImageDraw.Draw(img)
# Draw a maximum of 300 keypoints
for i in range(min(len(scale),300)):
x1 = kp[i,1]
y1 = kp[i,0]
x2 = kp[i,1]+2**scale[i]
y2 = kp[i,0]+2**scale[i]
coords = (x1, y1, x2, y2)
draw.ellipse(coords, fill = None, outline ='white')
if __name__ == '__main__':
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file...")
file_name = "Lenna.png"
def nothing(*arg):
pass
# Create sliderbars to change the values of CENSURE parameters online
# Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10
cv2.namedWindow('censure')
cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing)
cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing)
cv2.createTrackbar('mode', 'censure', 2, 2, nothing)
cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing)
cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing)
# Read image from file, then inspect the image dimensions
img = cv2.imread(file_name,1)
height, width, channels = img.shape
# Pull the different color channels from the image
blue = img[:,:,0]
green = img[:,:,1]
red = img[:,:,2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed
blue1 = Image.fromarray(blue)
green1 = Image.fromarray(green)
red1 = Image.fromarray(red)
gray1 = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keepig aspect ratio
m, n = 512, 512
if height > m or width > n:
blue1.thumbnail((m,n), Image.ANTIALIAS)
green1.thumbnail((m,n), Image.ANTIALIAS)
red1.thumbnail((m,n), Image.ANTIALIAS)
gray1.thumbnail((m,n), Image.ANTIALIAS)
# CENSURE related
mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"}
last_num_kp = 0
while True:
vis = gray.copy()
img = img1.copy()
# Read the values of the sliderbars and save them to variables
min_scale = cv2.getTrackbarPos('min_scale', 'censure')
max_scale = cv2.getTrackbarPos('max_scale', 'censure')
if min_scale is 0:
min_scale = 1
if min_scale + max_scale < 3:
max_scale = min_scale + 2
mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))]
non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000
line_threshold = cv2.getTrackbarPos('line_threshold', 'censure')
# Create a CENSURE feature detector
censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode,
non_max_threshold=non_max_threshold, line_threshold=line_threshold)
# Obtain the CENSURE features
censure.detect(blue1)
kp_blue, scale_blue = censure.keypoints, censure.scales
censure.detect(green1)
kp_green, scale_green = censure.keypoints, censure.scales
censure.detect(red1)
kp_red, scale_red = censure.keypoints, censure.scales
censure.detect(gray1)
kp_gray, scale_gray = censure.keypoints, censure.scales
# Print the # of features if it has changed between iterations
num_kp = len(censure.keypoints)
if last_num_kp != num_kp:
print("Number of keypoints: " + str(len(censure.keypoints)))
last_num_kp = num_kp
# Draw the feature points on the images
draw_keypoints(blue1, kp_blue, scale_blue)
draw_keypoints(green1, kp_green, scale_green)
draw_keypoints(red1, kp_red, scale_red)
draw_keypoints(gray1, kp_gray, scale_gray)
# Obtain the histogram of scale values
plt.clf() # clear the figure from any previous plot
scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1))
plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1)
plt.show(block=False)
plt.draw()
# Show the image with keypoints drawn over
image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB)
cv2.imshow('censure', image)
if 0xFF & cv2.waitKey(500) == 27:
break
cv2.destroyAllWindows() | gpl-2.0 |