repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
samuel1208/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
barbagroup/PetIBM | examples/ibpm/cylinder2dRe40/scripts/plotVorticity.py | 4 | 1401 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 2000 time steps (20 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 2000
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-3.0, 3.0, 16)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-1.0, 4.0)
ax.set_ylim(-2.0, 2.0)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
mrcslws/htmresearch | projects/thing_classification/thing_convergence.py | 3 | 13625 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file is used to run Thing experiments using simulated sensations.
"""
import random
import os
from math import ceil
import numpy as np
import pprint
import matplotlib.pyplot as plt
from sklearn import manifold, random_projection
from htmresearch.frameworks.layers.l2_l4_inference import (
L4L2Experiment, rerunExperimentFromLogfile)
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def getL4Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"columnCount": 256,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.01,
"minThreshold": 19,
"predictedSegmentDecrement": 0.0,
"activationThreshold": 19,
"sampleSize": 20,
"implementation": "etm",
}
def getL2Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"inputWidth": 256 * 16,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.5,
"synPermProximalDec": 0.0,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 9,
"sampleSizeProximal": 10,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 30,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 1.001,
"learningMode": True,
}
def locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i,v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats)-i + 1
# Never differs - converged in one iteration
return 1
def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap,
settlingTime):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time across all objects.
Given inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration across all runs.
"""
convergenceSum = 0.0
# For each object
for stats in inferenceStats:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
columnConvergence = locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
# Ensure this column has converged by the last iteration
# assert(columnConvergence <= len(stats[key]))
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint)/settlingTime)
return convergenceSum/len(inferenceStats)
def loadThingObjects(numCorticalColumns=1, objDataPath='./data/'):
"""
Load simulated sensation data on a number of different objects
There is one file per object, each row contains one feature, location pairs
The format is as follows
[(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
[list of active bits of feature]]
The content before "=>" is the true 3D location / sensation
The number of active bits in the location and feature is listed after "=>".
@return A simple object machine
"""
# create empty simple object machine
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=numCorticalColumns,
numFeatures=0,
numLocations=0,
)
for _ in range(numCorticalColumns):
objects.locations.append([])
objects.features.append([])
objFiles = []
for f in os.listdir(objDataPath):
if os.path.isfile(os.path.join(objDataPath, f)):
if '.log' in f:
objFiles.append(f)
idx = 0
OnBitsList = []
for f in objFiles:
objName = f.split('.')[0]
objName = objName[4:]
objFile = open('{}/{}'.format(objDataPath, f))
sensationList = []
for line in objFile.readlines():
# parse thing data file and extract feature/location vectors
sense = line.split('=>')[1].strip(' ').strip('\n')
OnBitsList.append(float(line.split('] =>')[0].split('/')[1]))
location = sense.split('],[')[0].strip('[')
feature = sense.split('],[')[1].strip(']')
location = np.fromstring(location, sep=',', dtype=np.uint8)
feature = np.fromstring(feature, sep=',', dtype=np.uint8)
# add the current sensation to object Machine
sensationList.append((idx, idx))
for c in range(numCorticalColumns):
objects.locations[c].append(set(location.tolist()))
objects.features[c].append(set(feature.tolist()))
idx += 1
objects.addObject(sensationList, objName)
print "load object file: {} object name: {} sensation # {}".format(
f, objName, len(sensationList))
OnBitsList
OnBitsList = np.array(OnBitsList)
plt.figure()
plt.hist(OnBitsList)
return objects, OnBitsList
def trainNetwork(objects, numColumns, l4Params, l2Params, verbose=False):
print " Training sensorimotor network ..."
objectNames = objects.objects.keys()
numObjects = len(objectNames)
exp = L4L2Experiment("shared_features",
L2Overrides=l2Params,
L4Overrides=l4Params,
numCorticalColumns=numColumns)
exp.learnObjects(objects.provideObjectsToLearn())
settlingTime = 1
L2Representations = exp.objectL2Representations
# if verbose:
# print "Learned object representations:"
# pprint.pprint(L2Representations, width=400)
# print "=========================="
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
maxSensationNumber = 30
overlapMat = np.zeros((numObjects, numObjects, maxSensationNumber))
numL2ActiveCells = np.zeros((numObjects, maxSensationNumber))
for objectIdx in range(numObjects):
objectId = objectNames[objectIdx]
obj = objects[objectId]
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
exp.sendReset()
for sensationNumber in range(maxSensationNumber):
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if sensationNumber >= len(objectCopy):
pair = objectCopy[-1]
else:
pair = objectCopy[sensationNumber]
if numColumns > 1:
raise NotImplementedError
else:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": False,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId, reset=False)
for i in range(numObjects):
overlapMat[objectIdx, i, sensationNumber] = len(
exp.getL2Representations()[0] &
L2Representations[objects.objects.keys()[i]][0])
# if verbose:
# print "Intersection with {}:{}".format(
# objectNames[i], overlapMat[objectIdx, i])
for c in range(numColumns):
numL2ActiveCells[objectIdx, sensationNumber] += len(
exp.getL2Representations()[c])
print "{} # L2 active cells {}: ".format(sensationNumber,
numL2ActiveCells[
objectIdx, sensationNumber])
if verbose:
print "Output for {}: {}".format(objectId, exp.getL2Representations())
print "Final L2 active cells {}: ".format(
numL2ActiveCells[objectIdx, sensationNumber])
print
exp.sendReset()
expResult = {'overlapMat': overlapMat,
'numL2ActiveCells': numL2ActiveCells}
return expResult
def computeAccuracy(expResult, objects):
objectNames = objects.objects.keys()
overlapMat = expResult['overlapMat'][:, :, -1]
numL2ActiveCells = expResult['numL2ActiveCells'][:, -1]
numCorrect = 0
numObjects = overlapMat.shape[0]
numFound = 0
percentOverlap = np.zeros(overlapMat.shape)
for i in range(numObjects):
for j in range(i, numObjects):
percentOverlap[i, j] = overlapMat[i, j] # / np.min([numL2ActiveCells[i], numL2ActiveCells[j]])
objectNames = np.array(objectNames)
for i in range(numObjects):
# idx = np.where(overlapMat[i, :]>confuseThresh)[0]
idx = np.where(percentOverlap[i, :] == np.max(percentOverlap[i, :]))[0]
print " {}, # sensations {}, best match is {}".format(
objectNames[i], len(objects[objectNames[i]]), objectNames[idx])
found = len(np.where(idx == i)[0]) > 0
numFound += found
if not found:
print "<=========== {} was not detected ! ===========>".format(objectNames[i])
if len(idx) > 1:
continue
if idx[0] == i:
numCorrect += 1
accuracy = float(numCorrect)/numObjects
numPerfect = len(np.where(numL2ActiveCells<=40)[0])
print "accuracy: {} ({}/{}) ".format(accuracy, numCorrect, numObjects)
print "perfect retrival ratio: {} ({}/{}) ".format(
float(numPerfect)/numObjects, numPerfect, numObjects)
print "Object detection ratio {}/{} ".format(numFound, numObjects)
return accuracy
def runExperimentAccuracyVsL4Thresh():
accuracyVsThresh = []
threshList = np.arange(13, 20)
for thresh in threshList:
numColumns = 1
l2Params = getL2Params()
l4Params = getL4Params()
l4Params['minThreshold'] = thresh
l4Params['activationThreshold'] = thresh
objects = loadThingObjects(1, './data')
expResult = trainNetwork(objects, numColumns, l4Params, l2Params, True)
accuracy = computeAccuracy(expResult, objects)
accuracyVsThresh.append(accuracy)
plt.figure()
plt.plot(threshList, accuracyVsThresh, '-o')
plt.xlabel('L4 distal Threshold')
plt.ylabel('Classification Accuracy')
plt.savefig('accuracyVsL4Thresh.pdf')
return threshList, accuracyVsThresh
if __name__ == "__main__":
# uncomment to plot accuracy as a function of L4 threshold
# threshList, accuracyVsThresh = runExperimentAccuracyVsL4Thresh()
numColumns = 1
l2Params = getL2Params()
l4Params = getL4Params()
verbose = 1
objects, OnBitsList = loadThingObjects(numColumns, './data')
expResult = trainNetwork(objects, numColumns, l4Params, l2Params, True)
accuracy = computeAccuracy(expResult, objects)
objectNames = objects.objects.keys()
numObjects = len(objectNames)
overlapMat = expResult['overlapMat']
numL2ActiveCells = expResult['numL2ActiveCells']
objectNames = objects.objects.keys()
numObjects = len(objectNames)
plt.figure()
for sensationNumber in range(10):
plt.imshow(overlapMat[:, :, sensationNumber])
plt.xticks(range(numObjects), objectNames, rotation='vertical', fontsize=4)
plt.yticks(range(numObjects), objectNames, fontsize=4)
plt.title('pairwise overlap at step {}'.format(sensationNumber))
plt.xlabel('target representation')
plt.ylabel('inferred representation')
plt.tight_layout()
plt.savefig('plots/overlap_matrix_step_{}.png'.format(sensationNumber))
# plot number of active cells for each object
plt.figure()
objectNamesSort = []
idx = np.argsort(expResult['numL2ActiveCells'][:, -1])
for i in idx:
objectNamesSort.append(objectNames[i])
plt.plot(numL2ActiveCells[idx, -1])
plt.xticks(range(numObjects), objectNamesSort, rotation='vertical', fontsize=5)
plt.tight_layout()
plt.ylabel('Number of active L2 cells')
plt.savefig('plots/number_of_active_l2_cells.pdf')
#
| agpl-3.0 |
rahul-c1/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/computation/eval.py | 7 | 10856 | #!/usr/bin/env python
"""Top level ``eval`` module.
"""
import warnings
import tokenize
from pandas.io.formats.printing import pprint_thing
from pandas.core.computation import _NUMEXPR_INSTALLED
from pandas.core.computation.expr import Expr, _parsers, tokenize_string
from pandas.core.computation.scope import _ensure_scope
from pandas.compat import string_types
from pandas.core.computation.engines import _engines
from pandas.util._validators import validate_bool_kwarg
def _check_engine(engine):
"""Make sure a valid engine is passed.
Parameters
----------
engine : str
Raises
------
KeyError
* If an invalid engine is passed
ImportError
* If numexpr was requested but doesn't exist
Returns
-------
string engine
"""
if engine is None:
if _NUMEXPR_INSTALLED:
engine = 'numexpr'
else:
engine = 'python'
if engine not in _engines:
raise KeyError('Invalid engine {0!r} passed, valid engines are'
' {1}'.format(engine, list(_engines.keys())))
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == 'numexpr':
if not _NUMEXPR_INSTALLED:
raise ImportError("'numexpr' is not installed or an "
"unsupported version. Cannot use "
"engine='numexpr' for query/eval "
"if 'numexpr' is not installed")
return engine
def _check_parser(parser):
"""Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in _parsers:
raise KeyError('Invalid parser {0!r} passed, valid parsers are'
' {1}'.format(parser, _parsers.keys()))
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, '__getitem__'):
name = type(resolver).__name__
raise TypeError('Resolver of type %r does not implement '
'the __getitem__ method' % name)
def _check_expression(expr):
"""Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr):
"""Convert an object to an expression.
Thus function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
s : unicode
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = pprint_thing(expr)
_check_expression(s)
return s
def _check_for_locals(expr, stack_level, parser):
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != 'pandas'
if not_pandas_parser:
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = ("The '@' prefix is not allowed in "
"top-level eval calls, \nplease refer to "
"your variables by name without the '@' "
"prefix")
if at_top_of_stack or not_pandas_parser:
for toknum, tokval in tokenize_string(expr):
if toknum == tokenize.OP and tokval == '@':
raise SyntaxError(msg)
def eval(expr, parser='pandas', engine=None, truediv=True,
local_dict=None, global_dict=None, resolvers=(), level=0,
target=None, inplace=None):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~pandas.DataFrame.query` method to inject the
:attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : a target object for assignment, optional, default is None
essentially this is a passed in resolver
inplace : bool, default True
If expression mutates, whether to modify object inplace or return
copy with mutation.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.eval
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
first_expr = True
if isinstance(expr, string_types):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != '']
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError("multi-line expressions are only valid in the "
"context of data, use DataFrame.eval")
first_expr = True
for expr in exprs:
expr = _convert_expression(expr)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = _ensure_scope(level + 1, global_dict=global_dict,
local_dict=local_dict, resolvers=resolvers,
target=target)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None and multi_line:
raise ValueError("Multi-line expressions are only valid"
" if all expressions contain an assignment")
# assign if needed
if env.target is not None and parsed_expr.assigner is not None:
if inplace is None:
warnings.warn(
"eval expressions containing an assignment currently"
"default to operating inplace.\nThis will change in "
"a future version of pandas, use inplace=True to "
"avoid this warning.",
FutureWarning, stacklevel=3)
inplace = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
target = env.target.copy()
else:
target = env.target
target[parsed_expr.assigner] = ret
if not resolvers:
resolvers = ({parsed_expr.assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if parsed_expr.assigner in resolver:
resolver[parsed_expr.assigner] = ret
break
else:
resolvers += ({parsed_expr.assigner: ret},)
ret = None
first_expr = False
if not inplace and inplace is not None:
return target
return ret
| mit |
Obus/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
cython-testbed/pandas | pandas/core/apply.py | 4 | 12744 | import warnings
import numpy as np
from pandas import compat
from pandas._libs import reduction
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
is_dict_like,
is_list_like,
is_sequence)
from pandas.util._decorators import cache_readonly
from pandas.io.formats.printing import pprint_thing
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
class FrameApply(object):
def __init__(self, obj, func, broadcast, raw, reduce, result_type,
ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, 'reduce', 'broadcast', 'expand']:
raise ValueError("invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}")
if broadcast is not None:
warnings.warn("The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if broadcast:
result_type = 'broadcast'
if reduce is not None:
warnings.warn("The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if reduce:
if result_type is not None:
raise ValueError(
"cannot pass both reduce=True and result_type")
result_type = 'reduce'
self.result_type = result_type
# curry if needed
if ((kwds or args) and
not isinstance(func, (np.ufunc, compat.string_types))):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, compat.string_types):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = compat.signature(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.f(self.values)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(result_values,
index=target.index,
columns=target.columns)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (self.result_type in ['reduce', None] and
not self.dtypes.apply(is_extension_type).any()):
# Create a dummy Series from an empty array
from pandas import Series
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
result = reduction.reduce(values, self.f,
axis=self.axis,
dummy=dummy,
labels=labels)
return self.obj._constructor_sliced(result, index=labels)
except Exception:
pass
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super(FrameRowApply, self).apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1)
for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
try:
result.index = self.res_columns
except ValueError:
pass
try:
result.columns = self.res_index
except ValueError:
pass
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super(FrameColumnApply, self).apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values,
self.index)))
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == 'expand':
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/datasets/_openml.py | 2 | 34451 | import gzip
import json
import os
import shutil
import hashlib
from os.path import join
from warnings import warn
from contextlib import closing
from functools import wraps
from typing import Callable, Optional, Dict, Tuple, List, Any, Union
import itertools
from collections.abc import Generator
from collections import OrderedDict
from functools import partial
from urllib.request import urlopen, Request
import numpy as np
import scipy.sparse
from ..externals import _arff
from ..externals._arff import ArffSparseDataType, ArffContainerType
from . import get_data_home
from urllib.error import HTTPError
from ..utils import Bunch
from ..utils import is_scalar_nan
from ..utils import get_chunk_n_rows
from ..utils import _chunk_generator
from ..utils import check_pandas_support # noqa
__all__ = ['fetch_openml']
_OPENML_PREFIX = "https://openml.org/"
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
_DATA_INFO = "api/v1/json/data/{}"
_DATA_FEATURES = "api/v1/json/data/features/{}"
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
_DATA_FILE = "data/v1/download/{}"
OpenmlQualitiesType = List[Dict[str, str]]
OpenmlFeaturesType = List[Dict[str, str]]
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, 'openml.org', openml_path + ".gz")
def _retry_with_clean_cache(
openml_path: str, data_home: Optional[str]
) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except HTTPError:
raise
except Exception:
warn("Invalid cache, redownloading file", RuntimeWarning)
local_path = _get_local_path(openml_path, data_home)
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
def _open_openml_url(openml_path: str, data_home: Optional[str]):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
Returns
-------
result : stream
A stream to the OpenML resource
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + openml_path)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = urlopen(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode='rb')
return fsrc
local_path = _get_local_path(openml_path, data_home)
if not os.path.exists(local_path):
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
# potentially, the directory has been created already
pass
try:
with closing(urlopen(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(local_path, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, 'rb')
class OpenMLError(ValueError):
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
pass
def _get_json_content_from_openml_api(
url: str,
error_message: Optional[str],
data_home: Optional[str]
) -> Dict:
"""
Loads json data from the openml api
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message)
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home)
def _load_json():
with closing(_open_openml_url(url, data_home)) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff['data']
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError(
"shape must be provided when arr['data'] is a Generator"
)
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(itertools.chain.from_iterable(arff_data),
dtype='float64', count=count)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape, dtype=np.float64)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError('Unexpected Data Type obtained from arff.')
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame
"""
if feature['data_type'] == 'string':
return object
elif feature['data_type'] == 'nominal':
return 'category'
# only numeric, integer, real are left
elif (feature['number_of_missing_values'] != '0' or
feature['data_type'] in ['numeric', 'real']):
# cast to floats when there are any missing values
return np.float64
elif feature['data_type'] == 'integer':
return np.int64
raise ValueError('Unsupported feature: {}'.format(feature))
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support('fetch_openml with as_frame=True')
attributes = OrderedDict(arff['attributes'])
arff_columns = list(attributes)
if not isinstance(arff['data'], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff['data'])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff['data'], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == 'category':
cats_without_missing = [cat for cat in attributes[column]
if cat is not None and
not is_scalar_nan(cat)]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df, )
def _get_data_info_by_name(
name: str, version: Union[int, str], data_home: Optional[str]
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
res = json_data['data']['dataset']
if len(res) > 1:
warn("Multiple active versions of the dataset matching the name"
" {name} exist. Versions may be fundamentally different, "
"returning version"
" {version}.".format(name=name, version=res[0]['version']))
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url, error_message=None, data_home=data_home
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name,
version)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
return json_data['data']['dataset'][0]
def _get_data_description_by_id(
data_id: int, data_home: Optional[str]
) -> Dict[str, Any]:
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
url = _DATA_INFO.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_set_description']
def _get_data_features(
data_id: int, data_home: Optional[str]
) -> OpenmlFeaturesType:
# OpenML function:
# https://www.openml.org/api_docs#!/data/get_data_features_id
url = _DATA_FEATURES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_features']['feature']
def _get_data_qualities(
data_id: int, data_home: Optional[str]
) -> OpenmlQualitiesType:
# OpenML API function:
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
url = _DATA_QUALITIES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
# the qualities might not be available, but we still try to process
# the data
return json_data.get('data_qualities', {}).get('quality', [])
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d['name']: d['value'] for d in data_qualities}
return int(float(qualities.get('NumberOfInstances', default_n_samples)))
def _load_arff_response(
url: str,
data_home: Optional[str],
return_type, encode_nominal: bool,
parse_arff: Callable[[ArffContainerType], Tuple],
md5_checksum: str
) -> Tuple:
"""Load arff data with url and parses arff response with parse_arff"""
response = _open_openml_url(url, data_home)
with closing(response):
# Note that if the data is dense, no reading is done until the data
# generator is iterated.
actual_md5_checksum = hashlib.md5()
def _stream_checksum_generator(response):
for line in response:
actual_md5_checksum.update(line)
yield line.decode('utf-8')
stream = _stream_checksum_generator(response)
arff = _arff.load(stream,
return_type=return_type,
encode_nominal=encode_nominal)
parsed_arff = parse_arff(arff)
# consume remaining stream, if early exited
for _ in stream:
pass
if actual_md5_checksum.hexdigest() != md5_checksum:
raise ValueError("md5 checksum of local file for " + url +
" does not match description. "
"Downloaded file could have been modified / "
"corrupted, clean cache and retry...")
return parsed_arff
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
features_list: List,
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str
):
"""Download OpenML ARFF and convert to Bunch of data
"""
# NB: this function is long in order to handle retry for any failure
# during the streaming parse of the ARFF.
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature['name']: feature for feature in features_list}
# XXX: col_slice_y should be all nominal or all numeric
_verify_target_data_type(features_dict, target_columns)
col_slice_y = [int(features_dict[col_name]['index'])
for col_name in target_columns]
col_slice_x = [int(features_dict[col_name]['index'])
for col_name in data_columns]
for col_idx in col_slice_y:
feat = features_list[col_idx]
nr_missing = int(feat['number_of_missing_values'])
if nr_missing > 0:
raise ValueError('Target column {} has {} missing values. '
'Missing values are not supported for target '
'columns. '.format(feat['name'], nr_missing))
# Access an ARFF file on the OpenML server. Documentation:
# https://www.openml.org/api_data_docs#!/data/get_download_id
if sparse is True:
return_type = _arff.COO
else:
return_type = _arff.DENSE_GEN
frame = nominal_attributes = None
parse_arff: Callable
postprocess: Callable
if as_frame:
columns = data_columns + target_columns
parse_arff = partial(_convert_arff_data_dataframe, columns=columns,
features_dict=features_dict)
def postprocess(frame):
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
return X, y, frame, nominal_attributes
else:
def parse_arff(arff):
X, y = _convert_arff_data(arff, col_slice_x, col_slice_y, shape)
# nominal attributes is a dict mapping from the attribute name to
# the possible values. Includes also the target column (which will
# be popped off below, before it will be packed in the Bunch
# object)
nominal_attributes = {k: v for k, v in arff['attributes']
if isinstance(v, list) and
k in data_columns + target_columns}
return X, y, nominal_attributes
def postprocess(X, y, nominal_attributes):
is_classification = {col_name in nominal_attributes
for col_name in target_columns}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack([
np.take(
np.asarray(nominal_attributes.pop(col_name),
dtype='O'),
y[:, i:i + 1].astype(int, copy=False))
for i, col_name in enumerate(target_columns)
])
elif any(is_classification):
raise ValueError('Mix of nominal and non-nominal targets is '
'not currently supported')
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
out = _retry_with_clean_cache(url, data_home)(
_load_arff_response)(url, data_home,
return_type=return_type,
encode_nominal=not as_frame,
parse_arff=parse_arff,
md5_checksum=md5_checksum)
X, y, frame, nominal_attributes = postprocess(*out)
return Bunch(data=X, target=y, frame=frame,
categories=nominal_attributes,
feature_names=data_columns,
target_names=target_columns)
def _verify_target_data_type(features_dict, target_columns):
# verifies the data type of the y array in case there are multiple targets
# (throws an error if these targets do not comply with sklearn support)
if not isinstance(target_columns, list):
raise ValueError('target_column should be list, '
'got: %s' % type(target_columns))
found_types = set()
for target_column in target_columns:
if target_column not in features_dict:
raise KeyError('Could not find target_column={}')
if features_dict[target_column]['data_type'] == "numeric":
found_types.add(np.float64)
else:
found_types.add(object)
# note: we compare to a string, not boolean
if features_dict[target_column]['is_ignore'] == 'true':
warn('target_column={} has flag is_ignore.'.format(
target_column))
if features_dict[target_column]['is_row_identifier'] == 'true':
warn('target_column={} has flag is_row_identifier.'.format(
target_column))
if len(found_types) > 1:
raise ValueError('Can only handle homogeneous multi-target datasets, '
'i.e., all targets are either numeric or '
'categorical.')
def _valid_data_column_names(features_list, target_columns):
# logic for determining on which columns can be learned. Note that from the
# OpenML guide follows that columns that have the `is_row_identifier` or
# `is_ignore` flag, these can not be learned on. Also target columns are
# excluded.
valid_data_column_names = []
for feature in features_list:
if (feature['name'] not in target_columns
and feature['is_ignore'] != 'true'
and feature['is_row_identifier'] != 'true'):
valid_data_column_names.append(feature['name'])
return valid_data_column_names
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = 'active',
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = 'default-target',
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = 'auto'
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets using joblib.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'openml')
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name))
data_info = _get_data_info_by_name(name, version, data_home)
data_id = data_info['did']
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version))
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or "
"data_id.")
data_description = _get_data_description_by_id(data_id, data_home)
if data_description['status'] != "active":
warn("Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description['version'],
data_description['name'],
data_description['url']))
if 'error' in data_description:
warn("OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description['error']))
if 'warning' in data_description:
warn("OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description['warning']))
return_sparse = False
if data_description['format'].lower() == 'sparse_arff':
return_sparse = True
if as_frame == 'auto':
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError('Cannot return dataframe with sparse data')
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if 'true' in (feature['is_ignore'], feature['is_row_identifier']):
continue
if feature['data_type'] == 'string':
raise ValueError('STRING attributes are not supported for '
'array representation. Try as_frame=True')
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [feature['name'] for feature in features_list
if feature['is_target'] == 'true']
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError("Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column)))
data_columns = _valid_data_column_names(features_list,
target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description['file_id'])
bunch = _download_data_to_bunch(url, return_sparse, data_home,
as_frame=bool(as_frame),
features_list=features_list, shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description[
"md5_checksum"])
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop('description'))
bunch.update(
DESCR=description, details=data_description,
url="https://www.openml.org/d/{}".format(data_id))
return bunch
| bsd-3-clause |
ZENGXH/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
BhallaLab/moose-full | moose-examples/snippets/switchKineticSolvers.py | 2 | 5089 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import moose
import pylab
import numpy
import matplotlib.pyplot as plt
import sys
def runAndSavePlots( name ):
runtime = 20.0
moose.reinit()
moose.start( runtime )
pa = moose.Neutral( '/model/graphs/' + name )
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
if ( x.tick != -1 ):
tabname = '/model/graphs/' + name + '/' + x.name + '.' + name
y = moose.Table( tabname )
y.vector = x.vector
y.tick = -1
# Takes args ee, gsl, or gssa
def switchSolvers( solver ):
if ( moose.exists( 'model/kinetics/stoich' ) ):
moose.delete( '/model/kinetics/stoich' )
moose.delete( '/model/kinetics/ksolve' )
compt = moose.element( '/model/kinetics' )
if ( solver == 'gsl' ):
ksolve = moose.Ksolve( '/model/kinetics/ksolve' )
if ( solver == 'gssa' ):
ksolve = moose.Gsolve( '/model/kinetics/ksolve' )
if ( solver != 'ee' ):
stoich = moose.Stoich( '/model/kinetics/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = "/model/kinetics/##"
def main():
"""
At zero order, you can select the solver you want to use within the
function moose.loadModel( filename, modelpath, solver ).
Having loaded in the model, you can change the solver to use on it.
This example illustrates how to assign and change solvers for a
kinetic model. This process is necessary in two situations:
* If we want to change the numerical method employed, for example,
from deterministic to stochastic.
* If we are already using a solver, and we have changed the reaction
network by adding or removing molecules or reactions.
Note that we do not have to change the solvers if the volume or
reaction rates change.
In this example the model is loaded in with a gsl solver. The
sequence of solver calculations is:
#. gsl
#. ee
#. gsl
#. gssa
#. gsl
If you're removing the solvers, you just delete the stoichiometry
object and the associated ksolve/gsolve. Should there be diffusion
(a dsolve)then you should delete that too. If you're
building the solvers up again, then you must do the following
steps in order:
#. build up the ksolve/gsolve and stoich (any order)
#. Assign stoich.ksolve
#. Assign stoich.path.
See the Reaction-diffusion section should you want to do diffusion
as well.
"""
solver = "gsl" # Pick any of gsl, gssa, ee..
mfile = '../genesis/kkit_objects_example.g'
modelId = moose.loadModel( mfile, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
compt = moose.element( '/model/kinetics' )
compt.volume = 1e-19
runAndSavePlots( 'gsl' )
#########################################################
switchSolvers( 'ee' )
runAndSavePlots( 'ee' )
#########################################################
switchSolvers( 'gsl' )
runAndSavePlots( 'gsl2' )
#########################################################
switchSolvers( 'gssa' )
runAndSavePlots( 'gssa' )
#########################################################
switchSolvers( 'gsl' )
runAndSavePlots( 'gsl3' )
#########################################################
# Display all plots.
fig = plt.figure( figsize = (12, 10) )
orig = fig.add_subplot( 511 )
gsl = fig.add_subplot( 512 )
ee = fig.add_subplot( 513 )
gsl2 = fig.add_subplot( 514 )
gssa = fig.add_subplot( 515 )
plotdt = moose.element( '/clock' ).tickDt[18]
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
orig.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/gsl/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
gsl.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/ee/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
ee.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/gsl2/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
gsl2.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/gssa/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
gssa.plot( t, x.vector, label=x.name )
plt.legend()
pylab.show()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
jstoxrocky/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
hainm/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
dingmingliu/quanttrade | bt/core.py | 1 | 37660 | """
Contains the core building blocks of the framework.
"""
import math
from copy import deepcopy
import pandas as pd
import numpy as np
import cython as cy
class Node(object):
"""
The Node is the main building block in bt's tree structure design.
Both StrategyBase and SecurityBase inherit Node. It contains the
core functionality of a tree node.
Args:
* name (str): The Node name
* parent (Node): The parent Node
* children (dict, list): A collection of children. If dict,
the format is {name: child}, if list then list of children.
Attributes:
* name (str): Node name
* parent (Node): Node parent
* root (Node): Root node of the tree (topmost node)
* children (dict): Node's children
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Node is stale and need
updating
* prices (TimeSeries): Prices of the Node. Prices for a security will
be the security's price, for a strategy it will be an index that
reflects the value of the strategy over time.
* price (float): last price
* value (float): last value
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Node + node's children
"""
_price = cy.declare(cy.double)
_value = cy.declare(cy.double)
_weight = cy.declare(cy.double)
_issec = cy.declare(cy.bint)
_has_strat_children = cy.declare(cy.bint)
def __init__(self, name, parent=None, children=None):
self.name = name
# strategy children helpers
self._has_strat_children = False
self._strat_children = []
# if children is not None, we assume that we want to limit the
# available children space to the provided list.
if children is not None:
if isinstance(children, list):
# if all strings - just save as universe_filter
if all(isinstance(x, str) for x in children):
self._universe_tickers = children
# empty dict - don't want to uselessly create
# tons of children when they might not be needed
children = {}
else:
# this will be case if we pass in children
# (say a bunch of sub-strategies)
tmp = {}
ut = []
for c in children:
if type(c) == str:
tmp[c] = SecurityBase(c)
ut.append(c)
else:
# deepcopy object for possible later reuse
tmp[c.name] = deepcopy(c)
# if strategy, turn on flag and add name to list
# strategy children have special treatment
if isinstance(c, StrategyBase):
self._has_strat_children = True
self._strat_children.append(c.name)
# if not strategy, then we will want to add this to
# universe_tickers to filter on setup
else:
ut.append(c.name)
children = tmp
# we want to keep whole universe in this case
# so set to None
self._universe_tickers = ut
if parent is None:
self.parent = self
self.root = self
else:
self.parent = parent
self.root = parent.root
parent._add_child(self)
# default children
if children is None:
children = {}
self._universe_tickers = None
self.children = children
self._childrenv = children.values()
for c in self._childrenv:
c.parent = self
c.root = self.root
# set default value for now
self.now = 0
# make sure root has stale flag
# used to avoid unncessary update
# sometimes we change values in the tree and we know that we will need
# to update if another node tries to access a given value (say weight).
# This avoid calling the update until it is actually needed.
self.root.stale = False
# helper vars
self._price = 0
self._value = 0
self._weight = 0
# is security flag - used to avoid updating 0 pos securities
self._issec = False
def __getitem__(self, key):
return self.children[key]
@property
def prices(self):
"""
A TimeSeries of the Node's price.
"""
# can optimize depending on type -
# securities don't need to check stale to
# return latest prices, whereas strategies do...
raise NotImplementedError()
@property
def price(self):
"""
Current price of the Node
"""
# can optimize depending on type -
# securities don't need to check stale to
# return latest prices, whereas strategies do...
raise NotImplementedError()
@property
def value(self):
"""
Current value of the Node
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._value
@property
def weight(self):
"""
Current weight of the Node (with respect to the parent).
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._weight
def setup(self, dates):
"""
Setup method used to initialize a Node with a set of dates.
"""
raise NotImplementedError()
def _add_child(self, child):
child.parent = self
child.root = self.root
if self.children is None:
self.children = {child.name: child}
else:
self.children[child.name] = child
self._childrenv = self.children.values()
def update(self, date, data=None, inow=None):
"""
Update Node with latest date, and optionally some data.
"""
raise NotImplementedError()
def adjust(self, amount, update=True, isflow=True):
"""
Adjust Node value by amount.
"""
raise NotImplementedError()
def allocate(self, amount, update=True):
"""
Allocate capital to Node.
"""
raise NotImplementedError()
@property
def members(self):
"""
Node members. Members include current node as well as Node's
children.
"""
res = [self]
for c in self.children.values():
res.extend(c.members)
return res
@property
def full_name(self):
if self.parent == self:
return self.name
else:
return '%s>%s' % (self.parent.full_name, self.name)
class StrategyBase(Node):
"""
Strategy Node. Used to define strategy logic within a tree.
A Strategy's role is to allocate capital to it's children
based on a function.
Args:
* name (str): Strategy name
* children (dict, list): A collection of children. If dict,
the format is {name: child}, if list then list of children.
Children can be any type of Node.
* parent (Node): The parent Node
Attributes:
* name (str): Strategy name
* parent (Strategy): Strategy parent
* root (Strategy): Root node of the tree (topmost node)
* children (dict): Strategy's children
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Strategy is stale and need
updating
* prices (TimeSeries): Prices of the Strategy - basically an index that
reflects the value of the strategy over time.
* price (float): last price
* value (float): last value
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Strategy + strategy's children
* commission_fn (fn(quantity, price)): A function used to determine the
commission (transaction fee) amount. Could be used to model slippage
(implementation shortfall). Note that often fees are symmetric for
buy and sell and absolute value of quantity should be used for
calculation.
* capital (float): Capital amount in Strategy - cash
* universe (DataFrame): Data universe available at the current time.
Universe contains the data passed in when creating a Backtest. Use
this data to determine strategy logic.
"""
_capital = cy.declare(cy.double)
_net_flows = cy.declare(cy.double)
_last_value = cy.declare(cy.double)
_last_price = cy.declare(cy.double)
_last_fee = cy.declare(cy.double)
_paper_trade = cy.declare(cy.bint)
bankrupt = cy.declare(cy.bint)
def __init__(self, name, children=None, parent=None):
Node.__init__(self, name, children=children, parent=parent)
self._capital = 0
self._weight = 1
self._value = 0
self._price = 100
# helper vars
self._net_flows = 0
self._last_value = 0
self._last_price = 100
self._last_fee = 0
# default commission function
self.commission_fn = self._dflt_comm_fn
self._paper_trade = False
self._positions = None
self.bankrupt = False
@property
def price(self):
"""
Current price.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._price
@property
def prices(self):
"""
TimeSeries of prices.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._prices.ix[:self.now]
@property
def values(self):
"""
TimeSeries of values.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._values.ix[:self.now]
@property
def capital(self):
"""
Current capital - amount of unallocated capital left in strategy.
"""
# no stale check needed
return self._capital
@property
def cash(self):
"""
TimeSeries of unallocated capital.
"""
# no stale check needed
return self._cash
@property
def fees(self):
"""
TimeSeries of fees.
"""
# no stale check needed
return self._fees
@property
def universe(self):
"""
Data universe available at the current time.
Universe contains the data passed in when creating a Backtest.
Use this data to determine strategy logic.
"""
# avoid windowing every time
# if calling and on same date return
# cached value
if self.now == self._last_chk:
return self._funiverse
else:
self._last_chk = self.now
self._funiverse = self._universe.ix[:self.now]
return self._funiverse
@property
def positions(self):
"""
TimeSeries of positions.
"""
# if accessing and stale - update first
if self.root.stale:
self.root.update(self.root.now, None)
if self._positions is not None:
return self._positions
else:
vals = pd.DataFrame({x.name: x.positions for x in self.members
if isinstance(x, SecurityBase)})
self._positions = vals
return vals
def setup(self, universe):
"""
Setup strategy with universe. This will speed up future calculations
and updates.
"""
# save full universe in case we need it
self._original_data = universe
# determine if needs paper trading
# and setup if so
if self is not self.parent:
self._paper_trade = True
self._paper_amount = 1000000
paper = deepcopy(self)
paper.parent = paper
paper.root = paper
paper._paper_trade = False
paper.setup(self._original_data)
paper.adjust(self._paper_amount)
self._paper = paper
# setup universe
funiverse = universe
if self._universe_tickers is not None:
# if we have universe_tickers defined, limit universe to
# those tickers
valid_filter = list(set(universe.columns)
.intersection(self._universe_tickers))
funiverse = universe[valid_filter].copy()
# if we have strat children, we will need to create their columns
# in the new universe
if self._has_strat_children:
for c in self._strat_children:
funiverse[c] = np.nan
# must create to avoid pandas warning
funiverse = pd.DataFrame(funiverse)
self._universe = funiverse
# holds filtered universe
self._funiverse = funiverse
self._last_chk = None
# We're not bankrupt yet
self.bankrupt = False
# setup internal data
self.data = pd.DataFrame(index=funiverse.index,
columns=['price', 'value', 'cash', 'fees'],
data=0.0)
self._prices = self.data['price']
self._values = self.data['value']
self._cash = self.data['cash']
self._fees = self.data['fees']
# setup children as well - use original universe here - don't want to
# pollute with potential strategy children in funiverse
if self.children is not None:
[c.setup(universe) for c in self._childrenv]
@cy.locals(newpt=cy.bint, val=cy.double, ret=cy.double)
def update(self, date, data=None, inow=None):
"""
Update strategy. Updates prices, values, weight, etc.
"""
# resolve stale state
self.root.stale = False
# update helpers on date change
# also set newpt flag
newpt = False
if self.now == 0:
newpt = True
elif date != self.now:
self._net_flows = 0
self._last_price = self._price
self._last_value = self._value
self._last_fee = 0.0
newpt = True
# update now
self.now = date
if inow is None:
if self.now == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# update children if any and calculate value
val = self._capital # default if no children
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
c.update(date, data, inow)
val += c.value
if self.root == self:
if (val < 0) and not self.bankrupt:
# Declare a bankruptcy
self.bankrupt = True
self.flatten()
# update data if this value is different or
# if now has changed - avoid all this if not since it
# won't change
if newpt or self._value != val:
self._value = val
self._values.values[inow] = val
try:
ret = self._value / (self._last_value
+ self._net_flows) - 1
except ZeroDivisionError:
if self._value == 0:
ret = 0
else:
raise ZeroDivisionError(
'Could not update %s. Last value '
'was %s and net flows were %s. Current'
'value is %s. Therefore, '
'we are dividing by zero to obtain the return '
'for the period.' % (self.name,
self._last_value,
self._net_flows,
self._value))
self._price = self._last_price * (1 + ret)
self._prices.values[inow] = self._price
# update children weights
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
try:
c._weight = c.value / val
except ZeroDivisionError:
c._weight = 0.0
# if we have strategy children, we will need to update them in universe
if self._has_strat_children:
for c in self._strat_children:
# TODO: optimize ".loc" here as well
self._universe.loc[date, c] = self.children[c].price
# Cash should track the unallocated capital at the end of the day, so
# we should update it every time we call "update".
# Same for fees
self._cash.values[inow] = self._capital
self._fees.values[inow] = self._last_fee
# update paper trade if necessary
if newpt and self._paper_trade:
self._paper.update(date)
self._paper.run()
self._paper.update(date)
# update price
self._price = self._paper.price
self._prices.values[inow] = self._price
@cy.locals(amount=cy.double, update=cy.bint, flow=cy.bint, fees=cy.double)
def adjust(self, amount, update=True, flow=True, fee=0.0):
"""
Adjust capital - used to inject capital to a Strategy. This injection
of capital will have no effect on the children.
Args:
* amount (float): Amount to adjust by.
* update (bool): Force update?
* flow (bool): Is this adjustment a flow? Basically a flow will
have an impact on the price index. Examples of flows are
commissions.
"""
# adjust capital
self._capital += amount
self._last_fee += fee
# if flow - increment net_flows - this will not affect
# performance. Commissions and other fees are not flows since
# they have a performance impact
if flow:
self._net_flows += amount
if update:
# indicates that data is now stale and must
# be updated before access
self.root.stale = True
@cy.locals(amount=cy.double, update=cy.bint)
def allocate(self, amount, child=None, update=True):
"""
Allocate capital to Strategy. By default, capital is allocated
recursively down the children, proportionally to the children's
weights. If a child is specified, capital will be allocated
to that specific child.
Allocation also have a side-effect. They will deduct the same amount
from the parent's "account" to offset the allocation. If there is
remaining capital after allocation, it will remain in Strategy.
Args:
* amount (float): Amount to allocate.
* child (str): If specified, allocation will be directed to child
only. Specified by name.
* update (bool): Force update.
"""
# allocate to child
if child is not None:
if child not in self.children:
c = SecurityBase(child)
c.setup(self._universe)
# update to bring up to speed
c.update(self.now)
# add child to tree
self._add_child(c)
# allocate to child
self.children[child].allocate(amount)
# allocate to self
else:
# adjust parent's capital
# no need to update now - avoids repetition
if self.parent == self:
self.parent.adjust(-amount, update=False, flow=True)
else:
# do NOT set as flow - parent will be another strategy
# and therefore should not incur flow
self.parent.adjust(-amount, update=False, flow=False)
# adjust self's capital
self.adjust(amount, update=False, flow=True)
# push allocation down to children if any
# use _weight to avoid triggering an update
if self.children is not None:
[c.allocate(amount * c._weight, update=False)
for c in self._childrenv]
# mark as stale if update requested
if update:
self.root.stale = True
@cy.locals(delta=cy.double, weight=cy.double, base=cy.double)
def rebalance(self, weight, child, base=np.nan, update=True):
"""
Rebalance a child to a given weight.
This is a helper method to simplify code logic. This method is used
when we want to se the weight of a particular child to a set amount.
It is similar to allocate, but it calculates the appropriate allocation
based on the current weight.
Args:
* weight (float): The target weight. Usually between -1.0 and 1.0.
* child (str): child to allocate to - specified by name.
* base (float): If specified, this is the base amount all weight
delta calculations will be based off of. This is useful when we
determine a set of weights and want to rebalance each child
given these new weights. However, as we iterate through each
child and call this method, the base (which is by default the
current value) will change. Therefore, we can set this base to
the original value before the iteration to ensure the proper
allocations are made.
* update (bool): Force update?
"""
# if weight is 0 - we want to close child
if weight == 0:
if child in self.children:
return self.close(child)
else:
return
# if no base specified use self's value
if np.isnan(base):
base = self.value
# else make sure we have child
if child not in self.children:
c = SecurityBase(child)
c.setup(self._universe)
# update child to bring up to speed
c.update(self.now)
self._add_child(c)
# allocate to child
# figure out weight delta
c = self.children[child]
delta = weight - c.weight
c.allocate(delta * base)
def close(self, child):
"""
Close a child position - alias for rebalance(0, child). This will also
flatten (close out all) the child's children.
Args:
* child (str): Child, specified by name.
"""
c = self.children[child]
# flatten if children not None
if c.children is not None and len(c.children) != 0:
c.flatten()
c.allocate(-c.value)
def flatten(self):
"""
Close all child positions.
"""
# go right to base alloc
[c.allocate(-c.value) for c in self._childrenv if c.value != 0]
def run(self):
"""
This is the main logic method. Override this method to provide some
algorithm to execute on each date change. This method is called by
backtester.
"""
pass
def set_commissions(self, fn):
"""
Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount.
"""
self.commission_fn = fn
for c in self._childrenv:
if isinstance(c, StrategyBase):
c.set_commissions(fn)
@cy.locals(q=cy.double, p=cy.double)
def _dflt_comm_fn(self, q, p):
return max(1, abs(q) * 0.01)
class SecurityBase(Node):
"""
Security Node. Used to define a security within a tree.
A Security's has no children. It simply models an asset that can be bought
or sold.
Args:
* name (str): Security name
* multiplier (float): security multiplier - typically used for
derivatives.
Attributes:
* name (str): Security name
* parent (Security): Security parent
* root (Security): Root node of the tree (topmost node)
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Security is stale and need
updating
* prices (TimeSeries): Security prices.
* price (float): last price
* value (float): last value - basically position * price * multiplier
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Security + strategy's children
* position (float): Current position (quantity).
"""
_last_pos = cy.declare(cy.double)
_position = cy.declare(cy.double)
multiplier = cy.declare(cy.double)
_prices_set = cy.declare(cy.bint)
_needupdate = cy.declare(cy.bint)
@cy.locals(multiplier=cy.double)
def __init__(self, name, multiplier=1):
Node.__init__(self, name, parent=None, children=None)
self._value = 0
self._price = 0
self._weight = 0
self._position = 0
self.multiplier = multiplier
# opt
self._last_pos = 0
self._issec = True
self._needupdate = True
@property
def price(self):
"""
Current price.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._price
@property
def prices(self):
"""
TimeSeries of prices.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._prices.ix[:self.now]
@property
def values(self):
"""
TimeSeries of values.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._values.ix[:self.now]
@property
def position(self):
"""
Current position
"""
# no stale check needed
return self._position
@property
def positions(self):
"""
TimeSeries of positions.
"""
# if accessing and stale - update first
if self._needupdate:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._positions.ix[:self.now]
def setup(self, universe):
"""
Setup Security with universe. Speeds up future runs.
Args:
* universe (DataFrame): DataFrame of prices with security's name as
one of the columns.
"""
# if we already have all the prices, we will store them to speed up
# future udpates
try:
prices = universe[self.name]
except KeyError:
prices = None
# setup internal data
if prices is not None:
self._prices = prices
self.data = pd.DataFrame(index=universe.index,
columns=['value', 'position'],
data=0.0)
self._prices_set = True
else:
self.data = pd.DataFrame(index=universe.index,
columns=['price', 'value', 'position'])
self._prices = self.data['price']
self._prices_set = False
self._values = self.data['value']
self._positions = self.data['position']
@cy.locals(prc=cy.double)
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
# filter for internal calls when position has not changed - nothing to
# do. Internal calls (stale root calls) have None data. Also want to
# make sure date has not changed, because then we do indeed want to
# update.
if date == self.now and self._last_pos == self._position:
return
if inow is None:
if date == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# date change - update price
if date != self.now:
# update now
self.now = date
if self._prices_set:
self._price = self._prices.values[inow]
# traditional data update
elif data is not None:
prc = data[self.name]
self._price = prc
self._prices.values[inow] = prc
self._positions.values[inow] = self._position
self._last_pos = self._position
self._value = self._position * self._price * self.multiplier
self._values.values[inow] = self._value
if self._weight == 0 and self._position == 0:
self._needupdate = False
@cy.locals(amount=cy.double, update=cy.bint, q=cy.double, outlay=cy.double)
def allocate(self, amount, update=True):
"""
This allocates capital to the Security. This is the method used to
buy/sell the security.
A given amount of shares will be determined on the current price, a
commisison will be calculated based on the parent's commission fn, and
any remaining capital will be passed back up to parent as an
adjustment.
Args:
* amount (float): Amount of adjustment.
* update (bool): Force update?
"""
# will need to update if this has been idle for a while...
# update if needupdate or if now is stale
# fetch parent's now since our now is stale
if self._needupdate or self.now != self.parent.now:
self.update(self.parent.now)
# ignore 0 alloc
# Note that if the price of security has dropped to zero, then it should
# never be selected by SelectAll, SelectN etc. I.e. we should not open
# the position at zero price. At the same time, we are able to close
# it at zero price, because at that point amount=0.
# Note also that we don't erase the position in an asset which price has
# dropped to zero (though the weight will indeed be = 0)
if amount == 0:
return
if self.parent is self or self.parent is None:
raise Exception(
'Cannot allocate capital to a parentless security')
if self._price == 0 or np.isnan(self._price):
raise Exception(
'Cannot allocate capital to '
'%s because price is 0 or nan as of %s'
% (self.name, self.parent.now))
# buy/sell
# determine quantity - must also factor in commission
# closing out?
if amount == -self._value:
q = -self._position
else:
if (self._position > 0) or ((self._position == 0) and (amount > 0)):
# if we're going long or changing long position
q = math.floor(amount / (self._price * self.multiplier))
else:
# if we're going short or changing short position
q = math.ceil(amount / (self._price * self.multiplier))
# if q is 0 nothing to do
if q == 0 or np.isnan(q):
return
# this security will need an update, even if pos is 0 (for example if
# we close the positions, value and pos is 0, but still need to do that
# last update)
self._needupdate = True
# adjust position & value
self._position += q
# calculate proper adjustment for parent
# parent passed down amount so we want to pass
# -outlay back up to parent to adjust for capital
# used
outlay, fee = self.outlay(q)
# call parent
self.parent.adjust(-outlay, update=update, flow=False, fee=fee)
@cy.locals(q=cy.double, p=cy.double)
def commission(self, q, p):
"""
Calculates the commission (transaction fee) based on quantity and price.
Uses the parent's commission_fn.
Args:
* q (float): quantity
* p (float): price
"""
return self.parent.commission_fn(q, p)
@cy.locals(q=cy.double)
def outlay(self, q):
"""
Determines the complete cash outlay (including commission) necessary
given a quantity q.
Second returning parameter is a commission itself.
Args:
* q (float): quantity
"""
fee = self.commission(q, self._price * self.multiplier)
full_outlay = q * self._price * self.multiplier + fee
return full_outlay, fee
def run(self):
"""
Does nothing - securities have nothing to do on run.
"""
pass
class Algo(object):
"""
Algos are used to modularize strategy logic so that strategy logic becomes
modular, composable, more testable and less error prone. Basically, the
Algo should follow the unix philosophy - do one thing well.
In practice, algos are simply a function that receives one argument, the
Strategy (refered to as target) and are expected to return a bool.
When some state preservation is necessary between calls, the Algo
object can be used (this object). The __call___ method should be
implemented and logic defined therein to mimic a function call. A
simple function may also be used if no state preservation is neceesary.
Args:
* name (str): Algo name
"""
def __init__(self, name=None):
self._name = name
@property
def name(self):
"""
Algo name.
"""
if self._name is None:
self._name = self.__class__.__name__
return self._name
def __call__(self, target):
raise NotImplementedError("%s not implemented!" % self.name)
class AlgoStack(Algo):
"""
An AlgoStack derives from Algo runs multiple Algos until a
failure is encountered.
The purpose of an AlgoStack is to group a logic set of Algos together. Each
Algo in the stack is run. Execution stops if one Algo returns False.
Args:
* algos (list): List of algos.
"""
def __init__(self, *algos):
super(AlgoStack, self).__init__()
self.algos = algos
self.check_run_always = any(hasattr(x, 'run_always')
for x in self.algos)
def __call__(self, target):
# normal runing mode
if not self.check_run_always:
for algo in self.algos:
if not algo(target):
return False
return True
# run mode when at least one algo has a run_always attribute
else:
# store result in res
# allows continuation to check for and run
# algos that have run_always set to True
res = True
for algo in self.algos:
if res:
res = algo(target)
elif hasattr(algo, 'run_always'):
if algo.run_always:
algo(target)
return res
class Strategy(StrategyBase):
"""
Strategy expands on the StrategyBase and incorporates Algos.
Basically, a Strategy is built by passing in a set of algos. These algos
will be placed in an Algo stack and the run function will call the stack.
Furthermore, two class attributes are created to pass data between algos.
perm for permanent data, temp for temporary data.
Args:
* name (str): Strategy name
* algos (list): List of Algos to be passed into an AlgoStack
* children (dict, list): Children - useful when you want to create
strategies of strategies
Attributes:
* stack (AlgoStack): The stack
* temp (dict): A dict containing temporary data - cleared on each call
to run. This can be used to pass info to other algos.
* perm (dict): Permanent data used to pass info from one algo to
another. Not cleared on each pass.
"""
def __init__(self, name, algos=[], children=None):
super(Strategy, self).__init__(name, children=children)
self.stack = AlgoStack(*algos)
self.temp = {}
self.perm = {}
def run(self):
# clear out temp data
self.temp = {}
# run algo stack
self.stack(self)
# run children
for c in self.children.values():
c.run()
| apache-2.0 |
zfrenchee/pandas | pandas/core/api.py | 1 | 3146 |
# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
from pandas.core.algorithms import factorize, unique, value_counts
from pandas.core.dtypes.missing import isna, isnull, notna, notnull
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.io.formats.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
UInt64Index, RangeIndex, Float64Index,
MultiIndex, IntervalIndex,
TimedeltaIndex, DatetimeIndex,
PeriodIndex, NaT)
from pandas.core.indexes.period import Period, period_range, pnow
from pandas.core.indexes.timedeltas import Timedelta, timedelta_range
from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range
from pandas.core.indexes.interval import Interval, interval_range
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
# TODO: Remove import when statsmodels updates #18264
from pandas.core.reshape.reshape import get_dummies
from pandas.core.indexing import IndexSlice
from pandas.core.tools.numeric import to_numeric
from pandas.tseries.offsets import DateOffset
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
# see gh-14094.
from pandas.util._depr_module import _DeprecatedModule
_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay',
'customBusinessMonthEnd', 'customBusinessMonthBegin',
'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin',
'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd',
'byearEnd', 'week']
datetools = _DeprecatedModule(deprmod='pandas.core.datetools',
removals=_removals)
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
# deprecation, xref #13790
def match(*args, **kwargs):
import warnings
warnings.warn("pd.match() is deprecated and will be removed "
"in a future version",
FutureWarning, stacklevel=2)
from pandas.core.algorithms import match
return match(*args, **kwargs)
def groupby(*args, **kwargs):
import warnings
warnings.warn("pd.groupby() is deprecated and will be removed; "
"Please use the Series.groupby() or "
"DataFrame.groupby() methods",
FutureWarning, stacklevel=2)
return args[0].groupby(*args[1:], **kwargs)
# Deprecation: xref gh-16747
class TimeGrouper(object):
def __new__(cls, *args, **kwargs):
from pandas.core.resample import TimeGrouper
import warnings
warnings.warn("pd.TimeGrouper is deprecated and will be removed; "
"Please use pd.Grouper(freq=...)",
FutureWarning, stacklevel=2)
return TimeGrouper(*args, **kwargs)
| bsd-3-clause |
sodafree/backend | build/ipython/docs/examples/parallel/dagdeps.py | 6 | 3566 | """Example for generating an arbitrary DAG as a dependency map.
This demo uses networkx to generate the graph.
Authors
-------
* MinRK
"""
import networkx as nx
from random import randint, random
from IPython import parallel
def randomwait():
import time
from random import random
time.sleep(random())
return time.time()
def random_dag(nodes, edges):
"""Generate a random Directed Acyclic Graph (DAG) with a given number of nodes and edges."""
G = nx.DiGraph()
for i in range(nodes):
G.add_node(i)
while edges > 0:
a = randint(0,nodes-1)
b=a
while b==a:
b = randint(0,nodes-1)
G.add_edge(a,b)
if nx.is_directed_acyclic_graph(G):
edges -= 1
else:
# we closed a loop!
G.remove_edge(a,b)
return G
def add_children(G, parent, level, n=2):
"""Add children recursively to a binary tree."""
if level == 0:
return
for i in range(n):
child = parent+str(i)
G.add_node(child)
G.add_edge(parent,child)
add_children(G, child, level-1, n)
def make_bintree(levels):
"""Make a symmetrical binary tree with @levels"""
G = nx.DiGraph()
root = '0'
G.add_node(root)
add_children(G, root, levels, 2)
return G
def submit_jobs(view, G, jobs):
"""Submit jobs via client where G describes the time dependencies."""
results = {}
for node in nx.topological_sort(G):
with view.temp_flags(after=[ results[n] for n in G.predecessors(node) ]):
results[node] = view.apply(jobs[node])
return results
def validate_tree(G, results):
"""Validate that jobs executed after their dependencies."""
for node in G:
started = results[node].metadata.started
for parent in G.predecessors(node):
finished = results[parent].metadata.completed
assert started > finished, "%s should have happened after %s"%(node, parent)
def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results
if __name__ == '__main__':
from matplotlib import pyplot as plt
# main(5,10)
main(32,96)
plt.show()
| bsd-3-clause |
nrhine1/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
preprocessed-connectomes-project/quality-assessment-protocol | scripts/qap_check_output_csv.py | 1 | 1302 | #!/usr/bin/env python
def main():
import os
import argparse
from qap.script_utils import check_csv_missing_subs, csv_to_pandas_df, \
write_inputs_dict_to_yaml_file, read_yml_file
from qap.qap_utils import raise_smart_exception
parser = argparse.ArgumentParser()
parser.add_argument("output_csv", type=str,
help="the main output directory of the QAP run "
"which contains the participant directories")
parser.add_argument("data_config", type=str,
help="the main output directory of the QAP run "
"which contains the participant directories")
parser.add_argument("data_type", type=str,
help="the main output directory of the QAP run "
"which contains the participant directories")
args = parser.parse_args()
csv_df = csv_to_pandas_df(args.output_csv)
data_dict = read_yml_file(args.data_config)
new_dict = check_csv_missing_subs(csv_df, data_dict, args.data_type)
if new_dict:
out_file = os.path.join(os.getcwd(),
"missing_%s_data.yml" % args.data_type)
write_inputs_dict_to_yaml_file(new_dict, out_file)
if __name__ == "__main__":
main() | bsd-3-clause |
mkocka/galaxytea | modeling/domcek/plots.py | 1 | 4294 | import matplotlib.pyplot as plt
from numpy import *
###List of variables
# r_in [10**10 cm] innder radius
# r_out [10**10 cm] outer radius
# step [10**10 cm] step of plot
# alfa [] parameter of accretion
# M_16 [10**16 g.s**(-1)] accretion flow
# m_1 [solar mass] mass of compact object
# R_hv [10**10 cm] radius of compact object
# R_10 [10**10 cm] distance from compact object
# f numerical factor
###List of computed parameters
# Surface density [g.cm**(-2)] (sigma)
# Height [cm] (H)
# Density [g.cm**(-3)] (rho)
# Central disc temeprature [K] (T_c)
# Opacity [] (tau)
# viscosity [cm**2.s**(-1)] (nu)
# radial velocity towards center [cm.s**(-1)] (v_r)
###function solutions parameters
# parameter 1 r_in
# parameter 2 r_out
# parameter 3 step
# parameter 4 alfa
# parameter 5 M_16
# parameter 6 m_1
# parameter 7 R_hv
def solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv):
#defining lists
list_function = arange(r_in,r_out,step)
R_10_l,surface_density_l,height_l,density_l,Fx = ([] for i in range(5))
temperature_l,opacity_l,viscosity_l,radial_velocity_l = ([] for i in range(4))
#computation and appending to lists
for R_10 in list_function:
f=(1-((R_hv)/(R_10))**(1.0/2))**(1.0/4)
surface_density = 5.2*alfa**(-4.0/5)*M_16**(7.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(14.0/5)
height = 1.7*10**8*alfa**(-1.0/10)*M_16**(3.0/20)*m_1**(-3.0/8)*R_10**(9.0/8)*f**(3.0/5)
density = 3.1*10**(-8)*alfa**(-7.0/10)*M_16**(11.0/20)*m_1**(5.0/8)*R_10**(-15.0/8)*f**(11.0/5)
temperature = 1.4*10**4*alfa**(-1.0/5)*M_16**(3.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(6.0/5)
opacity = 190*alfa**(-4.0/5)*M_16**(1.0/5)*f**(4.0/5)
viscosity = 1.8*10**14*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(3.0/4)*f**(6.0/5)
radial_velocity = 2.7*10**4*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(-1.0/4)*f**(-14.0/5)
R_10_l.append(R_10)
surface_density_l.append(surface_density)
height_l.append(height)
density_l.append(density)
temperature_l.append(temperature)
opacity_l.append(opacity)
viscosity_l.append(viscosity)
radial_velocity_l.append(radial_velocity)
Fx.append(f)
#transformation R_10 to kolimeters
R_km = [ x / 10**(-4) for x in R_10_l]
return R_km, surface_density_l, height_l, density_l,temperature_l,opacity_l,viscosity_l,radial_velocity_l,Fx
#for definitions of parameters look up
r_in =1.0001*10**(-4)
r_out =10**(-2)
step = 10**(-6)
alfa = 0.5
M_16 = 63
m_1 = 1.5
R_hv = 1.0*10**(-4)
lists=solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv)
print 30*"-"
print "Used parameter values"
print 30*"-"
print "innder radius:", 10*".",r_in, 10*".", "[10$^{10}$ cm]"
print "outer radius:", 10*".", r_out, 10*".", "[10$^{10}$ cm]"
print "step of plot:", 10*".", step, 10*".", "[10$^{10}$ cm]"
print "parameter of accretion alfa:", 10*".", alfa
print "accretion flow:", 10*".", M_16, 10*".", "[10$^6$ g.s${-1)}$]"
print "mass of compact object:", 10*".", m_1, 10*".", "[solar mass]"
print "radius of compact object:", 10*".", R_hv, 10*".", "[10$^{10}$ cm]"
plt.plot(lists[0], lists[1])
plt.title('surface density')
plt.xlabel('radius [km]')
plt.ylabel('surface density [g.cm$^{-2}$] ')
plt.grid()
plt.savefig("surface density")
plt.gcf().clear()
plt.plot(lists[0], lists[2])
plt.title('height')
plt.xlabel('radius [km]')
plt.ylabel('height [cm] ')
plt.grid()
plt.savefig("height")
plt.gcf().clear()
plt.plot(lists[0], lists[3])
plt.title('density')
plt.xlabel('radius [km]')
plt.ylabel('density [g.cm$^{-3}$] ')
plt.grid()
plt.savefig("density")
plt.gcf().clear()
plt.plot(lists[0], lists[4])
plt.title('temperature')
plt.xlabel('radius [km]')
plt.ylabel('temperature [K] ')
plt.grid()
plt.savefig("temperature")
plt.gcf().clear()
plt.plot(lists[0], lists[5])
plt.title('opacity')
plt.xlabel('radius [km]')
plt.ylabel('opacity ')
plt.grid()
plt.savefig("opacity")
plt.gcf().clear()
plt.plot(lists[0], lists[6])
plt.title('viscosity')
plt.xlabel('radius [km]')
plt.ylabel('viscosity [cm$^{2}$.s$^{-1}$] ')
plt.grid()
plt.savefig("viscosity")
plt.gcf().clear()
plt.plot(lists[0], lists[7])
plt.title('radial velocity')
plt.xlabel('radius [km]')
plt.ylabel('radial velocity [cm.s$^{-1}$] ')
plt.grid()
plt.savefig("radial velocity")
plt.gcf().clear()
| mit |
skggm/skggm | examples/trace_plot_example.py | 1 | 3138 | """
Visualize Regularization Path
=============================
Plot the edge level coefficients (inverse covariance entries)
as a function of the regularization parameter.
"""
import sys
import numpy as np
from sklearn.datasets import make_sparse_spd_matrix
sys.path.append("..")
from inverse_covariance import QuicGraphicalLasso
from inverse_covariance.plot_util import trace_plot
from inverse_covariance.profiling import LatticeGraph
def make_data(n_samples, n_features):
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng
)
cov = np.linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
return X, cov, prec
def make_data_banded(n_samples, n_features):
alpha = 0.1
cov, prec, adj = LatticeGraph(
n_blocks=2, random_sign=True, chain_blocks=True, seed=1
).create(n_features, alpha)
prng = np.random.RandomState(2)
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
return X, cov, prec
def show_quic_coefficient_trace(X):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=20)
def show_quic_coefficient_trace_truth(X, truth):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=6, ground_truth=truth)
if __name__ == "__main__":
# example 1
n_samples = 10
n_features = 5
X, cov, prec = make_data(n_samples, n_features)
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=10")
print(" n_features=5")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=100")
print(" n_features=5")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
# example 2
n_samples = 110
n_features = 100
X, cov, prec = make_data_banded(n_samples, n_features)
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
| mit |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py | 17 | 69089 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.empty(x.shape + (2,))
>>> pos[:, :, 0] = x; pos[:, :, 1] = y
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, " +
"but a.shape = %s." % str(alpha.shape))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have one entry less then the" +
" parameter vector 'a', but alpha.shape = " +
"%s and " % alpha.shape +
"x.shape = %s." % x.shape)
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater or equal zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal " +
"simplex. but sum(x)=%f." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| apache-2.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/stats/tests/test_moments.py | 3 | 89255 | import nose
import sys
import functools
import warnings
from datetime import datetime
from numpy.random import randn
from numpy.testing.decorators import slow
import numpy as np
from distutils.version import LooseVersion
from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull, concat
from pandas.util.testing import (
assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_index_equal
)
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
from pandas.compat import range, zip, PY3, StringIO
N, K = 100, 10
class Base(tm.TestCase):
_multiprocess_can_split_ = True
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestMoments(Base):
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,Series(np.ones(10)),3,center=True ,axis=1)
# ok ok
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=0)
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=1)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,DataFrame(np.ones((10,10))),3,center=True ,axis=2)
def test_rolling_sum(self):
self._check_moment_func(mom.rolling_sum, np.sum)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(mom.rolling_count, counter,
has_min_periods=False,
preserve_nan=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean)
def test_cmov_mean(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49,
16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_mean(vals, 5, center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_mean(Series(vals), 5, center=True)
assert_series_equal(xp, rs)
def test_cmov_window(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_window(Series(vals), 5, 'boxcar', center=True)
assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
# GH 8238
tm._skip_if_no_scipy()
# all nan
vals = np.empty(10, dtype=float)
vals.fill(np.nan)
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertTrue(np.isnan(rs).all())
# empty
vals = np.array([])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertEqual(len(rs), 0)
# shorter than window
vals = np.random.randn(5)
rs = mom.rolling_window(vals, 10, 'boxcar')
self.assertTrue(np.isnan(rs).all())
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
# Gh 8238
tm._skip_if_no_scipy()
vals = np.array([[ 12.18, 3.64],
[ 10.18, 9.16],
[ 13.24, 14.61],
[ 4.51, 8.11],
[ 6.15, 11.44],
[ 9.14, 6.21],
[ 11.31, 10.67],
[ 2.94, 6.51],
[ 9.42, 8.39],
[ 12.44, 7.34 ]])
xp = np.array([[ np.nan, np.nan],
[ np.nan, np.nan],
[ 9.252, 9.392],
[ 8.644, 9.906],
[ 8.87 , 10.208],
[ 6.81 , 8.588],
[ 7.792, 8.644],
[ 9.05 , 7.824],
[ np.nan, np.nan],
[ np.nan, np.nan]])
# DataFrame
rs = mom.rolling_window(DataFrame(vals), 5, 'boxcar', center=True)
assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = mom.rolling_mean(vals, 5, min_periods=4, center=True)
rs = mom.rolling_window(vals, 5, 'boxcar', min_periods=4, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009,
14.03687, 13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556,
13.33889, 13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559,
14.17267, 14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671,
14.03559, 15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607,
14.20036, 14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan]}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt in win_types:
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_missing_data(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, np.nan, 10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345,
9.17869, 12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599,
9.1764, 12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384,
9.56348, 12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618,
9.16786, 13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667,
10.34667, 12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098,
13.65509]
}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, min_periods=3)
assert_series_equal(xp, rs)
def test_cmov_window_special(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763,
13.89053, 13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589,
11.73161, 13.08516, 12.95111, 12.74577,
np.nan, np.nan],
'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284,
12.88331, 12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161,
12.75129, 12.90702, 12.83757, np.nan, np.nan]
}
for wt, k in zip(win_types, kwds):
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_cmov_window_special_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt, k in zip(win_types, kwds):
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(mom.rolling_median, np.median)
def test_rolling_min(self):
self._check_moment_func(mom.rolling_min, np.min)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_min(a, window=100, min_periods=1)
assert_almost_equal(b, np.ones(len(a)))
self.assertRaises(ValueError, mom.rolling_min, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_max(self):
self._check_moment_func(mom.rolling_max, np.max)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_max(a, window=100, min_periods=1)
assert_almost_equal(a, b)
self.assertRaises(ValueError, mom.rolling_max, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_quantile(self):
qs = [.1, .5, .9]
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = per / 1. * (values.shape[0] - 1)
return values[int(idx)]
for q in qs:
def f(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_quantile(x, window, q,
min_periods=min_periods,
freq=freq,
center=center)
def alt(x):
return scoreatpercentile(x, q)
self._check_moment_func(f, alt)
def test_rolling_apply(self):
# suppress warnings about empty slices, as we are deliberately testing with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
ser = Series([])
assert_series_equal(ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
def roll_mean(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_apply(x, window,
lambda x: x[np.isfinite(x)].mean(),
min_periods=min_periods,
freq=freq,
center=center)
self._check_moment_func(roll_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.rolling_apply(s, 2, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 2.])
assert_series_equal(result, expected)
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
# it works!
result = mom.rolling_apply(arr, 10, np.sum)
self.assertTrue(isnull(result).all())
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
assert_almost_equal(result, result)
def test_rolling_std(self):
self._check_moment_func(mom.rolling_std,
lambda x: np.std(x, ddof=1))
self._check_moment_func(functools.partial(mom.rolling_std, ddof=0),
lambda x: np.std(x, ddof=0))
def test_rolling_std_1obs(self):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
expected = np.array([np.nan] * 5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1, ddof=0)
expected = np.zeros(5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
self.assertTrue(np.isnan(result[2]))
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = np.array([0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767])
b = mom.rolling_std(a, window=3)
self.assertTrue(np.isfinite(b[2:]).all())
b = mom.ewmstd(a, span=3)
self.assertTrue(np.isfinite(b[2:]).all())
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var,
lambda x: np.var(x, ddof=1),
test_stable=True)
self._check_moment_func(functools.partial(mom.rolling_var, ddof=0),
lambda x: np.var(x, ddof=0))
def test_rolling_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_skew,
lambda x: skew(x, bias=False))
def test_rolling_kurt(self):
try:
from scipy.stats import kurtosis
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_kurt,
lambda x: kurtosis(x, bias=False))
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
arr = np.frombuffer(data, dtype='<f8')
if sys.byteorder != "little":
arr = arr.byteswap().newbyteorder()
result = mom.rolling_sum(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_mean(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_var(arr, 2)
self.assertTrue((result[1:] >= 0).all())
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
result = mom.rolling_mean(arr, 1)
self.assertTrue(result[-1] >= 0)
result = mom.rolling_mean(-arr, 1)
self.assertTrue(result[-1] <= 0)
def _check_moment_func(self, func, static_comp, window=50,
has_min_periods=True,
has_center=True,
has_time_rule=True,
preserve_nan=True,
fill_value=None,
test_stable=False):
self._check_ndarray(func, static_comp, window=window,
has_min_periods=has_min_periods,
preserve_nan=preserve_nan,
has_center=has_center,
fill_value=fill_value,
test_stable=test_stable)
self._check_structures(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center)
def _check_ndarray(self, func, static_comp, window=50,
has_min_periods=True,
preserve_nan=True,
has_center=True,
fill_value=None,
test_stable=False,
test_window=True):
result = func(self.arr, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
if has_min_periods:
result = func(arr, 50, min_periods=30)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# min_periods is working correctly
result = func(arr, 20, min_periods=15)
self.assertTrue(np.isnan(result[23]))
self.assertFalse(np.isnan(result[24]))
self.assertFalse(np.isnan(result[-6]))
self.assertTrue(np.isnan(result[-5]))
arr2 = randn(20)
result = func(arr2, 10, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, 20, min_periods=0)
result1 = func(arr, 20, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr, 50)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# GH 7925
if has_center:
if has_min_periods:
result = func(arr, 20, min_periods=15, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20, min_periods=15)[9:]
else:
result = func(arr, 20, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
self.assert_numpy_array_equivalent(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:] + 1e9))
# Test window larger than array, #7297
if test_window:
if has_min_periods:
for minp in (0, len(self.arr)-1, len(self.arr)):
result = func(self.arr, len(self.arr)+1, min_periods=minp)
expected = func(self.arr, len(self.arr), min_periods=minp)
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask,
np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = func(self.arr, len(self.arr)+1)
expected = func(self.arr, len(self.arr))
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
def _check_structures(self, func, static_comp,
has_min_periods=True, has_time_rule=True,
has_center=True,
fill_value=None):
series_result = func(self.series, 50)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, 50)
self.assertEqual(type(frame_result), DataFrame)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
if has_min_periods:
series_result = func(self.series[::2], win, min_periods=minp,
freq='B')
frame_result = func(self.frame[::2], win, min_periods=minp,
freq='B')
else:
series_result = func(self.series[::2], win, freq='B')
frame_result = func(self.frame[::2], win, freq='B')
last_date = series_result.index[-1]
prev_date = last_date - 24 * datetools.bday
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
assert_almost_equal(series_result[-1], static_comp(trunc_series))
assert_almost_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp))
# GH 7925
if has_center:
if has_min_periods:
minp = 10
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, min_periods=minp,
center=True)
frame_rs = func(self.frame, 25, min_periods=minp,
center=True)
else:
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, center=True)
frame_rs = func(self.frame, 25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
assert_series_equal(series_xp, series_rs)
assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(mom.ewma)
arr = np.zeros(1000)
arr[5] = 1
result = mom.ewma(arr, span=100, adjust=False).sum()
self.assertTrue(np.abs(result - 1) < 1e-2)
s = Series([1.0, 2.0, 4.0, 8.0])
expected = Series([1.0, 1.6, 2.736842, 4.923077])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=True),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [1.] * len(s))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [np.nan] * 2 + [1.] * 4)
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha)**2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha)**2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha), np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha)**3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha)**2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha)**3, np.nan, (1. - alpha) * alpha, alpha * ((1. - alpha)**2 + alpha)]),
(s3, False, True, [(1. - alpha)**2, np.nan, (1. - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = mom.ewma(s, com=com, adjust=adjust, ignore_na=ignore_na)
assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = mom.ewma(s, com=com, adjust=adjust)
assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(mom.ewmvar)
def test_ewmvol(self):
self._check_ew(mom.ewmvol)
def test_ewma_span_com_args(self):
A = mom.ewma(self.arr, com=9.5)
B = mom.ewma(self.arr, span=20)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ewma_halflife_arg(self):
A = mom.ewma(self.arr, com=13.932726172912965)
B = mom.ewma(self.arr, halflife=10.0)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]
for f in funcs:
result = f(arr, 3)
assert_almost_equal(result, arr)
def _check_ew(self, func):
self._check_ew_ndarray(func)
self._check_ew_structures(func)
def _check_ew_ndarray(self, func, preserve_nan=False):
result = func(self.arr, com=10)
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), 50, min_periods=min_periods)
if func == mom.ewma:
assert_series_equal(result, Series([1.]))
else:
# ewmstd, ewmvol, ewmvar with bias=False require at least two values
assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
self.assertEqual(result2.dtype, np.float_)
def _check_ew_structures(self, func):
series_result = func(self.series, com=10)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=['a', 'b', 99, 'd', 'd']),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel()
return len(set(values[notnull(values)])) == 1
def no_nans(x):
return x.notnull().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [ (x, is_constant(x), no_nans(x)) for x in data ]
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def _test_moments_consistency(self,
min_periods,
count, mean, mock_mean, corr,
var_unbiased=None, std_unbiased=None, cov_unbiased=None,
var_biased=None, std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notnull(values)].tolist())
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected)
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = x.max().max()
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
self.assertFalse((var_x < 0).any().any())
self.assertFalse((std_x < 0).any().any())
if cov:
cov_x_x = cov(x, x)
self.assertFalse((cov_x_x < 0).any().any())
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
self.assertFalse((var_x > 0).any().any())
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isnull().equals(y.isnull()):
# can only easily test two Series with similar structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) - var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) * std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) - mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([ _weights(s.iloc[:, i],
com=com,
adjust=adjust,
ignore_na=ignore_na) for i, _ in enumerate(s.columns) ],
axis=1)
w.index=s.index
w.columns=s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notnull()] = _weights(s[s.notnull()], com=com, adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha, i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method='ffill')
result[mom.expanding_count(s) < (max(min_periods, 1) if min_periods else 1)] = np.nan
return result
com = 3.
for min_periods in [0, 1, 2, 3, 4]:
for adjust in [True, False]:
for ignore_na in [False, True]:
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
mock_mean=lambda x: _ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
corr=lambda x, y: mom.ewmcorr(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
var_unbiased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
std_unbiased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
cov_unbiased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
var_biased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
std_biased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
@slow
def test_expanding_consistency(self):
base_functions = [
(mom.expanding_count, lambda v: Series(v).count(), None),
(mom.expanding_max, lambda v: Series(v).max(), None),
(mom.expanding_min, lambda v: Series(v).min(), None),
(mom.expanding_sum, lambda v: Series(v).sum(), None),
(mom.expanding_mean, lambda v: Series(v).mean(), None),
(mom.expanding_std, lambda v: Series(v).std(), 1),
(mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.expanding_var, lambda v: Series(v).var(), 1),
#(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.expanding_median, lambda v: Series(v).median(), None),
(mom.expanding_max, np.nanmax, 1),
(mom.expanding_min, np.nanmin, 1),
(mom.expanding_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.expanding_mean, np.nanmean, 1),
(mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.expanding_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.expanding_max, np.max, None),
(mom.expanding_min, np.min, None),
(mom.expanding_sum, np.sum, None),
(mom.expanding_mean, np.mean, None),
(mom.expanding_std, lambda v: np.std(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.var(v, ddof=1), 1),
(mom.expanding_median, np.median, None),
]
# suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
# test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
# or (b) expanding_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (expanding_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if expanding_f is mom.expanding_count:
expanding_f_result = expanding_f(x)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
else:
if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
else:
expanding_f_result = expanding_f(x, min_periods=min_periods)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
assert_equal(expanding_f_result, expanding_apply_f_result)
if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
# test pairwise=True
expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
assert_panel_equal(expanding_f_result, expected)
@slow
def test_rolling_consistency(self):
base_functions = [
(mom.rolling_count, lambda v: Series(v).count(), None),
(mom.rolling_max, lambda v: Series(v).max(), None),
(mom.rolling_min, lambda v: Series(v).min(), None),
(mom.rolling_sum, lambda v: Series(v).sum(), None),
(mom.rolling_mean, lambda v: Series(v).mean(), None),
(mom.rolling_std, lambda v: Series(v).std(), 1),
(mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.rolling_var, lambda v: Series(v).var(), 1),
#(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.rolling_median, lambda v: Series(v).median(), None),
(mom.rolling_max, np.nanmax, 1),
(mom.rolling_min, np.nanmin, 1),
(mom.rolling_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.rolling_mean, np.nanmean, 1),
(mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.rolling_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.rolling_max, np.max, None),
(mom.rolling_min, np.min, None),
(mom.rolling_sum, np.sum, None),
(mom.rolling_mean, np.mean, None),
(mom.rolling_std, lambda v: np.std(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.var(v, ddof=1), 1),
(mom.rolling_median, np.median, None),
]
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: mom.rolling_count(x, window=window, center=center),
mean=lambda x: mom.rolling_mean(x, window=window, min_periods=min_periods, center=center),
mock_mean=lambda x: mom.rolling_sum(x, window=window, min_periods=min_periods, center=center).divide(
mom.rolling_count(x, window=window, center=center)),
corr=lambda x, y: mom.rolling_corr(x, y, window=window, min_periods=min_periods, center=center),
var_unbiased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center),
std_unbiased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center),
cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
cov_biased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center, ddof=0),
var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
# test consistency between rolling_xyz() and either (a) rolling_apply of Series.xyz(),
# or (b) rolling_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (rolling_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if rolling_f is mom.rolling_count:
rolling_f_result = rolling_f(x, window=window, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=0, center=center)
else:
if rolling_f in [mom.rolling_cov, mom.rolling_corr]:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center, pairwise=False)
else:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=min_periods, center=center)
assert_equal(rolling_f_result, rolling_apply_f_result)
if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
# test pairwise=True
rolling_f_result = rolling_f(x, x, window=window, min_periods=min_periods,
center=center, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = rolling_f(x.iloc[:, i], x.iloc[:, j],
window=window, min_periods=min_periods, center=center)
assert_panel_equal(rolling_f_result, expected)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_cov(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment(mom.rolling_cov, 10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_corr(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = mom.rolling_corr(a, b, len(a), min_periods=1)
assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment(mom.rolling_corr, 10, min_periods=5)
def _check_pairwise_moment(self, func, *args, **kwargs):
panel = func(self.frame, *args, **kwargs)
actual = panel.ix[:, 1, 5]
expected = func(self.frame[1], self.frame[5], *args, **kwargs)
tm.assert_series_equal(actual, expected)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
self.assertRaises(TypeError, mom._flex_binary_moment,5,6,None)
def test_corr_sanity(self):
#GH 3155
df = DataFrame(
np.array(
[[ 0.87024726, 0.18505595],
[ 0.64355431, 0.3091617 ],
[ 0.92372966, 0.50552513],
[ 0.00203756, 0.04520709],
[ 0.84780328, 0.33394331],
[ 0.78369152, 0.63919667]])
)
res = mom.rolling_corr(df[0],df[1],5,center=True)
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
# and some fuzzing
for i in range(10):
df = DataFrame(np.random.rand(30,2))
res = mom.rolling_corr(df[0],df[1],5,center=True)
try:
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
except:
print(res)
def test_flex_binary_frame(self):
def _check(method):
series = self.frame[1]
res = method(series, self.frame, 10)
res2 = method(self.frame, series, 10)
exp = self.frame.apply(lambda x: method(series, x, 10))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = method(self.frame, frame2, 10)
exp = DataFrame(dict((k, method(self.frame[k], frame2[k], 10))
for k in self.frame))
tm.assert_frame_equal(res3, exp)
methods = [mom.rolling_corr, mom.rolling_cov]
for meth in methods:
_check(meth)
def test_ewmcov(self):
self._check_binary_ew(mom.ewmcov)
def test_ewmcov_pairwise(self):
self._check_pairwise_moment(mom.ewmcov, span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew(mom.ewmcorr)
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment(mom.ewmcorr, span=10, min_periods=5)
def _check_binary_ew(self, func):
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
self.assertTrue(np.isnan(result.values[:14]).all())
self.assertFalse(np.isnan(result.values[14:]).any())
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply(self):
ser = Series([])
assert_series_equal(ser, mom.expanding_apply(ser, lambda x: x.mean()))
def expanding_mean(x, min_periods=1, freq=None):
return mom.expanding_apply(x,
lambda x: x.mean(),
min_periods=min_periods,
freq=freq)
self._check_expanding(expanding_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.expanding_apply(s, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 3.])
assert_series_equal(result, expected)
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = mom.expanding_apply(df, np.mean) + 20.
assert_frame_equal(mom.expanding_apply(df, mean_w_arg, args=(20,)),
expected)
assert_frame_equal(mom.expanding_apply(df, mean_w_arg,
kwargs={'const' : 20}),
expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = mom.expanding_corr(A, B)
rolling_result = mom.rolling_corr(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = mom.expanding_count(self.series)
assert_almost_equal(result, mom.rolling_count(self.series,
len(self.series)))
def test_expanding_quantile(self):
result = mom.expanding_quantile(self.series, 0.5)
rolling_result = mom.rolling_quantile(self.series,
len(self.series),
0.5, min_periods=1)
assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = mom.expanding_cov(A, B)
rolling_result = mom.rolling_cov(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_max(self):
self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)
def test_expanding_cov_pairwise(self):
result = mom.expanding_cov(self.frame)
rolling_result = mom.rolling_cov(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_corr_pairwise(self):
result = mom.expanding_corr(self.frame)
rolling_result = mom.rolling_corr(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_cov(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, None, 4.5])
assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_corr(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, None, 1.])
assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_cov(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_cov(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_corr(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_corr(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_functions_window_non_shrinkage(self):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1,5], [3, 2], [3,9], [-1,0]], columns=['A','B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
df_expected_panel = Panel(items=df.index, major_axis=df.columns, minor_axis=df.columns)
functions = [lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df_result = f(df)
assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
assert_panel_equal(df_result_panel, df_expected_panel)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2_expected = df2
df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns)
functions = [lambda x: mom.expanding_count(x),
lambda x: mom.expanding_cov(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_max(x, min_periods=5),
lambda x: mom.expanding_min(x, min_periods=5),
lambda x: mom.expanding_sum(x, min_periods=5),
lambda x: mom.expanding_mean(x, min_periods=5),
lambda x: mom.expanding_std(x, min_periods=5),
lambda x: mom.expanding_var(x, min_periods=5),
lambda x: mom.expanding_skew(x, min_periods=5),
lambda x: mom.expanding_kurt(x, min_periods=5),
lambda x: mom.expanding_quantile(x, quantile=0.5, min_periods=5),
lambda x: mom.expanding_median(x, min_periods=5),
lambda x: mom.expanding_apply(x, func=sum, min_periods=5),
lambda x: mom.rolling_count(x, window=10),
lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df1_result = f(df1)
assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.expanding_cov(x, x, pairwise=True, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
assert_panel_equal(df1_result_panel, df1_expected_panel)
df2_result_panel = f(df2)
assert_panel_equal(df2_result_panel, df2_expected_panel)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B'])
df1a = DataFrame([[1,5], [3,9]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_cov(df1, df2, pairwise=True)[2]
result2 = mom.expanding_cov(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_cov(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_cov(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,2], [3, 2], [3,4]], columns=['A','B'])
df1a = DataFrame([[1,2], [3,4]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_corr(df1, df2, pairwise=True)[2]
result2 = mom.expanding_corr(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_corr(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_corr(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_pairwise_stats_column_names_order(self):
# GH 7738
df1s = [DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C','C']),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1.,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0.,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C',1]),
DataFrame([[2.,4.],[1.,2.],[5.,2.],[8.,1.]], columns=[1,0.]),
DataFrame([[2,4.],[1,2.],[5,2.],[8,1.]], columns=[0,1.]),
DataFrame([[2,4],[1,2],[5,2],[8,1.]], columns=[1.,'X']),
]
df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X'])
s = Series([1,1,3,8])
# suppress warnings about incomparable objects, as we are deliberately testing with such column labels
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*incomparable objects.*", category=RuntimeWarning)
# DataFrame methods (which do not call _flex_binary_moment())
for f in [lambda x: x.cov(),
lambda x: x.corr(),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.columns)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=True
for f in [lambda x: mom.expanding_cov(x, pairwise=True),
lambda x: mom.expanding_corr(x, pairwise=True),
lambda x: mom.rolling_cov(x, window=3, pairwise=True),
lambda x: mom.rolling_corr(x, window=3, pairwise=True),
lambda x: mom.ewmcov(x, com=3, pairwise=True),
lambda x: mom.ewmcorr(x, com=3, pairwise=True),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=False
for f in [lambda x: mom.expanding_cov(x, pairwise=False),
lambda x: mom.expanding_corr(x, pairwise=False),
lambda x: mom.rolling_cov(x, window=3, pairwise=False),
lambda x: mom.rolling_corr(x, window=3, pairwise=False),
lambda x: mom.ewmcov(x, com=3, pairwise=False),
lambda x: mom.ewmcorr(x, com=3, pairwise=False),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
lambda x, y: mom.expanding_corr(x, y, pairwise=True),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
]:
results = [f(df, df2) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
lambda x, y: mom.expanding_corr(x, y, pairwise=False),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
]:
results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
for (df, result) in zip(df1s, results):
if result is not None:
expected_index = df.index.union(df2.index)
expected_columns = df.columns.union(df2.columns)
assert_index_equal(result.index, expected_index)
assert_index_equal(result.columns, expected_columns)
else:
tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
# DataFrame with a Series
for f in [lambda x, y: mom.expanding_cov(x, y),
lambda x, y: mom.expanding_corr(x, y),
lambda x, y: mom.rolling_cov(x, y, window=3),
lambda x, y: mom.rolling_corr(x, y, window=3),
lambda x, y: mom.ewmcov(x, y, com=3),
lambda x, y: mom.ewmcorr(x, y, com=3),
]:
results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_skew(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_skew(d, window=2)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
0.177994, 1.548824])
x = mom.rolling_skew(d, window=4)
assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_kurt(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_kurt(d, window=3)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
1.224307, 2.671499])
x = mom.rolling_kurt(d, window=4)
assert_series_equal(expected, x)
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
assert_almost_equal(result[10],
static_comp(self.arr[:11]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
arr = randn(50)
if has_min_periods:
result = func(arr, min_periods=30)
assert(np.isnan(result[:29]).all())
assert_almost_equal(result[-1], static_comp(arr[:50]))
# min_periods is working correctly
result = func(arr, min_periods=15)
self.assertTrue(np.isnan(result[13]))
self.assertFalse(np.isnan(result[14]))
arr2 = randn(20)
result = func(arr2, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, min_periods=0)
result1 = func(arr, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr)
assert_almost_equal(result[-1], static_comp(arr[:50]))
def _check_expanding_structures(self, func):
series_result = func(self.series)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True,
preserve_nan=True):
self._check_expanding_ndarray(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
preserve_nan=preserve_nan)
self._check_expanding_structures(func)
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='median')
assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0+10.0+20.0)/3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='mean')
assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_min(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_median_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_median(series, window=1, freq='D')
assert_series_equal(expected, x)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
jcasner/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/path.py | 69 | 20263 | """
Contains a class for managing paths (polylines).
"""
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib._path import point_in_path, get_path_extents, \
point_in_path_collection, get_path_collection_extents, \
path_in_path, path_intersects_path, convert_path_to_polygons
from matplotlib.cbook import simple_linear_interpolation
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
to get the vertex/code pairs. This is important, since many
:class:`Path` objects, as an optimization, do not store a *codes*
at all, but have a default one provided for them by
:meth:`iter_segments`.
Note also that the vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 5 # 1 vertex
NUM_VERTICES = [1, 1, 1, 2, 3, 1]
code_type = np.uint8
def __init__(self, vertices, codes=None):
"""
Create a new path with the given vertices and codes.
*vertices* is an Nx2 numpy float array, masked array or Python
sequence.
*codes* is an N-length numpy array or Python sequence of type
:attr:`matplotlib.path.Path.code_type`.
These two arrays must have the same length in the first
dimension.
If *codes* is None, *vertices* will be treated as a series of
line segments.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if codes is not None:
codes = np.asarray(codes, self.code_type)
assert codes.ndim == 1
assert len(codes) == len(vertices)
assert vertices.ndim == 2
assert vertices.shape[1] == 2
self.should_simplify = (len(vertices) >= 128 and
(codes is None or np.all(codes <= Path.LINETO)))
self.has_nonfinite = not np.isfinite(vertices).all()
self.codes = codes
self.vertices = vertices
#@staticmethod
def make_compound_path(*args):
"""
(staticmethod) Make a compound path from a list of Path
objects. Only polygons (not curves) are supported.
"""
for p in args:
assert p.codes is None
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = Path.LINETO * np.ones(total_length)
i = 0
for length in lengths:
codes[i] = Path.MOVETO
i += length
return Path(vertices, codes)
make_compound_path = staticmethod(make_compound_path)
def __repr__(self):
return "Path(%s, %s)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, simplify=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
If *simplify* is provided, it must be a tuple (*width*,
*height*) defining the size of the figure, in native units
(e.g. pixels or points). Simplification implies both removing
adjacent line segments that are very close to parallel, and
removing line segments outside of the figure. The path will
be simplified *only* if :attr:`should_simplify` is True, which
is determined in the constructor by this criteria:
- No curves
- More than 128 vertices
"""
vertices = self.vertices
if not len(vertices):
return
codes = self.codes
len_vertices = len(vertices)
isfinite = np.isfinite
NUM_VERTICES = self.NUM_VERTICES
MOVETO = self.MOVETO
LINETO = self.LINETO
CLOSEPOLY = self.CLOSEPOLY
STOP = self.STOP
if simplify is not None and self.should_simplify:
polygons = self.to_polygons(None, *simplify)
for vertices in polygons:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
elif codes is None:
if self.has_nonfinite:
next_code = MOVETO
for v in vertices:
if np.isfinite(v).all():
yield v, next_code
next_code = LINETO
else:
next_code = MOVETO
else:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
else:
i = 0
was_nan = False
while i < len_vertices:
code = codes[i]
if code == CLOSEPOLY:
yield [], code
i += 1
elif code == STOP:
return
else:
num_vertices = NUM_VERTICES[int(code)]
curr_vertices = vertices[i:i+num_vertices].flatten()
if not isfinite(curr_vertices).all():
was_nan = True
elif was_nan:
yield curr_vertices[-2:], MOVETO
was_nan = False
else:
yield curr_vertices, code
i += num_vertices
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`:
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes)
def contains_point(self, point, transform=None):
"""
Returns *True* if the path contains the given point.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return point_in_path(point[0], point[1], self, transform)
def contains_path(self, path, transform=None):
"""
Returns *True* if this path completely contains the given path.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from transforms import Bbox
if transform is not None:
transform = transform.frozen()
return Bbox(get_path_extents(self, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0):
"""
Convert this path to a list of polygons. Each polygon is an
Nx2 array of vertices. In other words, each polygon has no
``MOVETO`` instructions or curves. This is useful for
displaying in backends that do not support compound paths or
Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
if transform is None:
return [self.vertices]
else:
return [transform.transform(self.vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return convert_path_to_polygons(self, transform, width, height)
_unit_rectangle = None
#@classmethod
def unit_rectangle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]])
return cls._unit_rectangle
unit_rectangle = classmethod(unit_rectangle)
_unit_regular_polygons = WeakValueDictionary()
#@classmethod
def unit_regular_polygon(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
path = Path(verts)
cls._unit_regular_polygons[numVertices] = path
return path
unit_regular_polygon = classmethod(unit_regular_polygon)
_unit_regular_stars = WeakValueDictionary()
#@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
(staticmethod) Returns a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
path = Path(verts)
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
unit_regular_star = classmethod(unit_regular_star)
#@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
unit_regular_asterisk = classmethod(unit_regular_asterisk)
_unit_circle = None
#@classmethod
def unit_circle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit circle.
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(26)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle = Path(vertices, codes)
return cls._unit_circle
unit_circle = classmethod(unit_circle)
#@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
(staticmethod) Returns an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
if (theta2 - theta1 > np.pi) and (eta2 - eta1 < np.pi):
eta2 += twopi
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [Path.MOVETO, Path.LINETO]
codes[-2:] = [Path.LINETO, Path.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = Path.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset :end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset :end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return Path(vertices, codes)
arc = classmethod(arc)
#@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
(staticmethod) Returns a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
wedge = classmethod(wedge)
_get_path_collection_extents = get_path_collection_extents
def get_path_collection_extents(*args):
"""
Given a sequence of :class:`Path` objects, returns the bounding
box that encapsulates all of them.
"""
from transforms import Bbox
if len(args[1]) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_get_path_collection_extents(*args))
| agpl-3.0 |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/sphinxext/ipython_directive.py | 12 | 42845 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
Pseudo-Decorators
=================
Note: Only one decorator is supported per input. If more than one decorator
is specified, then only the last one is used.
In addition to the Pseudo-Decorators/options described at the above link,
several enhancements have been made. The directive will emit a message to the
console at build-time if code-execution resulted in an exception or warning.
You can suppress these on a per-block basis by specifying the :okexcept:
or :okwarning: options:
.. code-block:: rst
.. ipython::
:okexcept:
:okwarning:
In [1]: 1/0
In [2]: # raise warning.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import atexit
import os
import re
import sys
import tempfile
import ast
import warnings
import shutil
# Third-party
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
# Our own
from traitlets.config import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.tmp_profile_dir = tmp_profile_dir
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
# this is assigned by the SetUp method of IPythonDirective
# to point at itself.
#
# So, you can access handy things at self.directive.state
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def cleanup(self):
shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# The "rest" is the standard output of the input. This needs to be
# added when in verbatim mode. If there is no "rest", then we don't
# add it, as the new line will be added by the processed output.
ret.append(rest)
# Fetch the processed output. (This is not the submitted output.)
self.cout.seek(0)
processed_output = self.cout.read()
if not is_suppress and not is_semicolon:
#
# In IPythonDirective.run, the elements of `ret` are eventually
# combined such that '' entries correspond to newlines. So if
# `processed_output` is equal to '', then the adding it to `ret`
# ensures that there is a blank line between consecutive inputs
# that have no outputs, as in:
#
# In [1]: x = 4
#
# In [2]: x = 5
#
# When there is processed output, it has a '\n' at the tail end. So
# adding the output to `ret` will provide the necessary spacing
# between consecutive input/output blocks, as in:
#
# In [1]: x
# Out[1]: 5
#
# In [2]: x
# Out[2]: 5
#
# When there is stdout from the input, it also has a '\n' at the
# tail end, and so this ensures proper spacing as well. E.g.:
#
# In [1]: print x
# 5
#
# In [2]: x = 5
#
# When in verbatim mode, `processed_output` is empty (because
# nothing was passed to IP. Sometimes the submitted code block has
# an Out[] portion and sometimes it does not. When it does not, we
# need to ensure proper spacing, so we have to add '' to `ret`.
# However, if there is an Out[] in the submitted code, then we do
# not want to add a newline as `process_output` has stuff to add.
# The difficulty is that `process_input` doesn't know if
# `process_output` will be called---so it doesn't know if there is
# Out[] in the code block. The requires that we include a hack in
# `process_block`. See the comments there.
#
ret.append(processed_output)
elif is_semicolon:
# Make sure there is a newline after the semicolon.
ret.append('')
# context information
filename = "Unknown"
lineno = 0
if self.directive.state:
filename = self.directive.state.document.current_source
lineno = self.directive.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in processed_output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(processed_output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(('-' * 76) + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, processed_output,
is_doctest, decorator, image_file, image_directive)
def process_output(self, data, output_prompt, input_lines, output,
is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
# Recall: `data` is the submitted output, and `output` is the processed
# output from `input_lines`.
TAB = ' ' * 4
if is_doctest and output is not None:
found = output # This is the processed output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
# When in verbatim mode, this holds additional submitted output
# to be written in the final Sphinx output.
# https://github.com/ipython/ipython/issues/5776
out_data = []
is_verbatim = decorator=='@verbatim' or self.is_verbatim
if is_verbatim and data.strip():
# Note that `ret` in `process_block` has '' as its last element if
# the code block was in verbatim mode. So if there is no submitted
# output, then we will have proper spacing only if we do not add
# an additional '' to `out_data`. This is why we condition on
# `and data.strip()`.
# The submitted output has no output prompt. If we want the
# prompt and the code to appear, we need to join them now
# instead of adding them separately---as this would create an
# undesired newline. How we do this ultimately depends on the
# format of the output regex. I'll do what works for the default
# prompt for now, and we might have to adjust if it doesn't work
# in other cases. Finally, the submitted output does not have
# a trailing newline, so we must add it manually.
out_data.append("{0} {1}\n".format(output_prompt, data))
return out_data
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
outdir = self.state.document.settings.env.app.outdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path or '_static'
if isinstance(savefig_dir, list):
savefig_dir = os.path.join(*savefig_dir)
savefig_dir = os.path.join(outdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
# parts consists of all text within the ipython-block.
# Each part is an input/output block.
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' {0}'.format(line)
for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines) > 2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| gpl-2.0 |
pianomania/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
cuttlefishh/emp | code/01-metadata/metadata_template_generator.py | 1 | 4911 | #!/usr/bin/env python
import click
import pandas as pd
import re
# Hard-coded variables
investigation_type = 'metagenome'
# Function: return dataframe of environmental package-specific metadata items
# A single environmental package (soil) or list can be provided (soil,water).
def show_items_of_env_pkg(df_env_pkg, list_of_env_pkg):
"""Return dataframe of environmental package-specific metadata items"""
df_items = df_env_pkg[df_env_pkg['Environmental package'].isin(list_of_env_pkg)]
return df_items
# Function: return dataframe of metadata template
def create_template_for_env_pkg(df_QiitaEBI, df_MIMS, df_env_pkg, list_of_env_pkg, number_of_samples, sample_prefix):
"""Return dataframe of metadata template"""
# get headers/requirement/example of Qiita-EBI/MIMS/env_pkg columns
pkg_items = show_items_of_env_pkg(df_env_pkg, list_of_env_pkg)
headers_env_pkg = pkg_items['Structured comment name'].values
require_env_pkg = pkg_items['Requirement']
example_env_pkg = pkg_items['Value syntax']
headers_all = list(df_QiitaEBI.iloc[0]) + list(df_MIMS.iloc[0]) + list(headers_env_pkg)
require_all = pd.concat([df_QiitaEBI.iloc[1], df_MIMS.iloc[1], require_env_pkg])
example_all = pd.concat([df_QiitaEBI.iloc[2], df_MIMS.iloc[2], example_env_pkg])
# populate template dataframe
df_template = pd.DataFrame(columns=headers_all, dtype=object)
df_template.loc['Requirement'] = require_all.values
df_template.loc['Format'] = example_all.values
string_of_env_pkg = re.sub(r'\W', '.', '.'.join(list_of_env_pkg))
for i in range(0, number_of_samples):
df_template.loc[i+1] = ['' for x in range(len(df_template.columns))]
df_template.loc[i+1]['sample_name'] = '%s.%s.%s' % (sample_prefix, string_of_env_pkg, i+1)
df_template.loc[i+1]['investigation_type'] = investigation_type
df_template.loc[i+1]['env_package'] = ' or '.join(list_of_env_pkg)
return df_template
@click.command()
@click.option('--qiita_ebi_mims_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Excel file with Qiita/EBI and MIMS required fields. Example: Qiita_EBI_MIMS_v1.xlsx')
@click.option('--migs_mims_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Excel file with MIxS standards. Example: MIGS_MIMS_v4.xls')
@click.option('--list_of_env_pkg', required=True, type=click.STRING, help="One (recommended) or more (separated by commas) environmental package. Choose from: air, built environment, host-associated, human-associated, human-skin, human-oral, human-gut, human-vaginal, microbial mat/biofilm, misc environment, plant-associated, sediment, soil, wastewater/sludge, water")
@click.option('--number_of_samples', required=True, type=click.INT, help='Number of samples (per environmental package) to create rows for in the template')
@click.option('--sample_prefix', required=True, type=click.STRING, help='Prefix string to prepend to sample numbers in row indexes. Example: Metcalf40 (EMP500 PI and study number)')
# Main function: generate metadata template and readme csv files
def generate_metadata_template(qiita_ebi_mims_path, migs_mims_path, list_of_env_pkg, number_of_samples, sample_prefix):
"""Generate metadata template and readme csv files"""
# Qiita/EBI/MIMS Excel file to DataFrames
df_QiitaEBI = pd.read_excel(qiita_ebi_mims_path, sheetname='QiitaEBI', header=None)
df_MIMS = pd.read_excel(qiita_ebi_mims_path, sheetname='MIMS', header=None)
list_of_env_pkg = list_of_env_pkg.split(",")
# MIGS/MIMS Excel file to DataFrames
df_README = pd.read_excel(migs_mims_path, sheetname='README', header=None)
df_MIGS_MIMS = pd.read_excel(migs_mims_path, sheetname='MIGS_MIMS', header=0, index_col=0)
df_env_pkg = pd.read_excel(migs_mims_path, sheetname='environmental_packages', header=0)
# generate template file
df_template = create_template_for_env_pkg(df_QiitaEBI, df_MIMS, df_env_pkg, list_of_env_pkg, number_of_samples, sample_prefix)
string_of_env_pkg = re.sub(r'\W', '_', '_'.join(list_of_env_pkg))
df_template.to_csv('%s_%s_%s_samples.csv' % (sample_prefix, string_of_env_pkg, number_of_samples), index_label='index')
# generate info file
df_MIMS_select = df_MIGS_MIMS[df_MIGS_MIMS.Section.isin(['investigation', 'environment', 'migs/mims/mimarks extension'])]
df_MIMS_select.to_csv('README_MIMS_metadata.csv')
df_env_pkg_select = show_items_of_env_pkg(df_env_pkg, list_of_env_pkg)
del df_env_pkg_select['Environmental package']
df_env_pkg_select.set_index('Structured comment name', inplace=True)
string_of_env_pkg = re.sub(r'\W', '_', '_'.join(list_of_env_pkg))
df_env_pkg_select.to_csv('README_%s_metadata.csv' % string_of_env_pkg)
# Execute main function
if __name__ == '__main__':
generate_metadata_template()
| bsd-3-clause |
steven-murray/pydftools | setup.py | 1 | 2408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import io
import os
import re
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"scipy",
"numpy>=1.6.2",
"Click>=6.0",
"attrs>=17.0",
"cached_property",
"chainconsumer",
"matplotlib"
# TODO: put package requirements here
]
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8"),
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup_requirements = [
"pytest-runner",
# TODO(steven-murray): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
"pytest",
# TODO: put package test requirements here
]
setup(
name="pydftools",
version=find_version("pydftools", "__init__.py"),
description="A pure-python port of the dftools R package.",
long_description=readme + "\n\n" + history,
author="Steven Murray",
author_email="steven.murray@curtin.edu.au",
url="https://github.com/steven-murray/pydftools",
packages=find_packages(include=["pydftools"]),
entry_points={"console_scripts": ["pydftools=pydftools.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="pydftools",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| mit |
chvogl/tardis | tardis/io/config_reader.py | 1 | 40145 | # Module to read the rather complex config data
import logging
import os
import pprint
from astropy import constants, units as u
import numpy as np
import pandas as pd
import yaml
import tardis
from tardis.io.model_reader import read_density_file, \
calculate_density_after_time, read_abundances_file
from tardis.io.config_validator import ConfigurationValidator
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, \
element_symbol2atomic_number
import copy
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.join(tardis.__path__[0], 'data')
default_config_definition_file = os.path.join(data_dir,
'tardis_config_definition.yml')
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters
----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns
-------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calculate_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:math:`\\rho = \\rho_0 \\times \\exp \\left( -\\frac{v}{v_0} \\right)`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calculate_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v}{v_0} \\right)^n`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
exponent : ~float
exponent used in the powerlaw
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho_0 * np.power((velocities / velocity_0), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
-(time_explosion * inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
-(time_explosion * inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
-(time_explosion * inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:, i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return density_dict['value'].to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities,
density_dict['w7_v_0'],
density_dict['w7_rho_0'], -7)
densities = calculate_density_after_time(densities,
density_dict['w7_time_0'],
time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
exponent = density_dict.pop('exponent')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities, v_0, rho_0, exponent)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_exponential_density(velocities, v_0, rho_0)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (
float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def parse_spectrum_list2dict(spectrum_list):
"""
Parse the spectrum list [start, stop, num] to a list
"""
if spectrum_list[0].unit.physical_type != 'length' and \
spectrum_list[1].unit.physical_type != 'length':
raise ValueError('start and end of spectrum need to be a length')
spectrum_config_dict = {}
spectrum_config_dict['start'] = spectrum_list[0]
spectrum_config_dict['end'] = spectrum_list[1]
spectrum_config_dict['bins'] = spectrum_list[2]
spectrum_frequency = np.linspace(
spectrum_config_dict['end'].to('Hz', u.spectral()),
spectrum_config_dict['start'].to('Hz', u.spectral()),
num=spectrum_config_dict['bins'] + 1)
spectrum_config_dict['frequency'] = spectrum_frequency
return spectrum_config_dict
def parse_convergence_section(convergence_section_dict):
"""
Parse the convergence section dictionary
Parameters
----------
convergence_section_dict: ~dict
dictionary
"""
convergence_parameters = ['damping_constant', 'threshold', 'fraction',
'hold_iterations']
for convergence_variable in ['t_inner', 't_rad', 'w']:
if convergence_variable not in convergence_section_dict:
convergence_section_dict[convergence_variable] = {}
convergence_variable_section = convergence_section_dict[convergence_variable]
for param in convergence_parameters:
if convergence_variable_section.get(param, None) is None:
if param in convergence_section_dict:
convergence_section_dict[convergence_variable][param] = (
convergence_section_dict[param])
return convergence_section_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class ConfigurationNameSpace(dict):
"""
The configuration name space class allows to wrap a dictionary and adds
utility functions for easy access. Accesses like a.b.c are then possible
Code from http://goo.gl/KIaq8I
Parameters
----------
config_dict: ~dict
configuration dictionary
Returns
-------
config_ns: ConfigurationNameSpace
"""
@classmethod
def from_yaml(cls, fname):
"""
Read a configuration from a YAML file
Parameters
----------
fname: str
filename or path
"""
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
return cls.from_config_dict(yaml_dict)
@classmethod
def from_config_dict(cls, config_dict, config_definition_file=None):
"""
Validating a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
return cls(ConfigurationValidator(config_definition,
config_dict).get_config())
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'expected dict'
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value,
ConfigurationNameSpace):
value = ConfigurationNameSpace(value)
if key in self and hasattr(self[key], 'unit'):
value = u.Quantity(value, self[key].unit)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
return super(ConfigurationNameSpace, self).__getitem__(key)
def __getattr__(self, item):
if item in self:
return self[item]
else:
super(ConfigurationNameSpace, self).__getattribute__(item)
__setattr__ = __setitem__
def __dir__(self):
return self.keys()
def get_config_item(self, config_item_string):
"""
Get configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
config_item = config_item_path[0]
if config_item.startswith('item'):
return self[config_item_path[0]]
else:
return self[config_item]
elif len(config_item_path) == 2 and\
config_item_path[1].startswith('item'):
return self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
else:
return self[config_item_path[0]].get_config_item(
'.'.join(config_item_path[1:]))
def set_config_item(self, config_item_string, value):
"""
set configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
value:
value to set the parameter with it
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
self[config_item_path[0]] = value
elif len(config_item_path) == 2 and \
config_item_path[1].startswith('item'):
current_value = self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
if hasattr(current_value, 'unit'):
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] =\
u.Quantity(value, current_value.unit)
else:
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] = value
else:
self[config_item_path[0]].set_config_item(
'.'.join(config_item_path[1:]), value)
def deepcopy(self):
return ConfigurationNameSpace(copy.deepcopy(dict(self)))
class Configuration(ConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(open(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, config_dict, atom_data=None, test_parser=False,
config_definition_file=None, validate=True):
"""
Validating and subsequently parsing a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
atom_data: ~tardis.atomic.AtomData
atom data object. if `None` will be tried to be read from
atom data file path in the config_dict [default=None]
test_parser: ~bool
switch on to ignore a working atom_data, mainly useful for
testing this reader
config_definition_file: ~str
path to config definition file, if `None` will be set to the default
in the `data` directory that ships with TARDIS
validate: ~bool
Turn validation on or off.
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
if validate:
validated_config_dict = ConfigurationValidator(config_definition,
config_dict).get_config()
else:
validated_config_dict = config_dict
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in validated_config_dict.keys():
atom_data_fname = validated_config_dict['atom_data']
validated_config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
validated_config_dict['supernova']['luminosity_nu_start'] = \
validated_config_dict['supernova']['luminosity_wavelength_end'].to(
u.Hz, u.spectral())
try:
validated_config_dict['supernova']['luminosity_nu_end'] = \
(validated_config_dict['supernova']
['luminosity_wavelength_start'].to(u.Hz, u.spectral()))
except ZeroDivisionError:
validated_config_dict['supernova']['luminosity_nu_end'] = (
np.inf * u.Hz)
validated_config_dict['supernova']['time_explosion'] = (
validated_config_dict['supernova']['time_explosion'].cgs)
validated_config_dict['supernova']['luminosity_requested'] = (
validated_config_dict['supernova']['luminosity_requested'].cgs)
#Parsing the model section
model_section = validated_config_dict['model']
v_inner = None
v_outer = None
mean_densities = None
abundances = None
structure_section = model_section['structure']
if structure_section['type'] == 'specific':
start, stop, num = model_section['structure']['velocity']
num += 1
velocities = np.linspace(start, stop, num)
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(
model_section['structure']['density'], v_inner, v_outer,
validated_config_dict['supernova']['time_explosion']).cgs
elif structure_section['type'] == 'file':
v_inner, v_outer, mean_densities, inner_boundary_index, \
outer_boundary_index = read_density_file(
structure_section['filename'], structure_section['filetype'],
validated_config_dict['supernova']['time_explosion'],
structure_section['v_inner_boundary'],
structure_section['v_outer_boundary'])
r_inner = validated_config_dict['supernova']['time_explosion'] * v_inner
r_outer = validated_config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_validated_config_dict = {}
structure_section['v_inner'] = v_inner.cgs
structure_section['v_outer'] = v_outer.cgs
structure_section['mean_densities'] = mean_densities.cgs
no_of_shells = len(v_inner)
structure_section['no_of_shells'] = no_of_shells
structure_section['r_inner'] = r_inner.cgs
structure_section['r_outer'] = r_outer.cgs
structure_section['r_middle'] = r_middle.cgs
structure_section['volumes'] = ((4. / 3) * np.pi * \
(r_outer ** 3 -
r_inner ** 3)).cgs
#### TODO the following is legacy code and should be removed
validated_config_dict['structure'] = \
validated_config_dict['model']['structure']
# ^^^^^^^^^^^^^^^^
abundances_section = model_section['abundances']
if abundances_section['type'] == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
if element_symbol_string == 'type': continue
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_section['type'] == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
validated_config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = validated_config_dict['plasma']
if plasma_section['initial_t_inner'] < 0.0 * u.K:
luminosity_requested = validated_config_dict['supernova']['luminosity_requested']
plasma_section['t_inner'] = ((luminosity_requested /
(4 * np.pi * r_inner[0] ** 2 *
constants.sigma_sb)) ** .25).to('K')
logger.info('"initial_t_inner" is not specified in the plasma '
'section - initializing to %s with given luminosity',
plasma_section['t_inner'])
else:
plasma_section['t_inner'] = plasma_section['initial_t_inner']
plasma_section['t_rads'] = np.ones(no_of_shells) * \
plasma_section['initial_t_rad']
if plasma_section['disable_electron_scattering'] is False:
logger.debug("Electron scattering switched on")
validated_config_dict['montecarlo']['sigma_thomson'] = 6.652486e-25 / (u.cm ** 2)
else:
logger.warn('Disabling electron scattering - this is not physical')
validated_config_dict['montecarlo']['sigma_thomson'] = 1e-200 / (u.cm ** 2)
##### NLTE subsection of Plasma start
nlte_validated_config_dict = {}
nlte_species = []
nlte_section = plasma_section['nlte']
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_validated_config_dict['species'] = nlte_species
nlte_validated_config_dict['species_string'] = nlte_species_list
nlte_validated_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_validated_config_dict:
nlte_validated_config_dict['species'] = []
plasma_section['nlte'] = nlte_validated_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = validated_config_dict['montecarlo']
if montecarlo_section['last_no_of_packets'] < 0:
montecarlo_section['last_no_of_packets'] = \
montecarlo_section['no_of_packets']
default_convergence_section = {'type': 'damped',
'lock_t_inner_cycles': 1,
't_inner_update_exponent': -0.5,
'damping_constant': 0.5}
if montecarlo_section['convergence_strategy'] is None:
logger.warning('No convergence criteria selected - '
'just damping by 0.5 for w, t_rad and t_inner')
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(default_convergence_section))
else:
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(
montecarlo_section['convergence_strategy']))
black_body_section = montecarlo_section['black_body_sampling']
montecarlo_section['black_body_sampling'] = {}
montecarlo_section['black_body_sampling']['start'] = \
black_body_section[0]
montecarlo_section['black_body_sampling']['end'] = \
black_body_section[1]
montecarlo_section['black_body_sampling']['samples'] = \
black_body_section[2]
###### END of convergence section reading
validated_config_dict['spectrum'] = parse_spectrum_list2dict(
validated_config_dict['spectrum'])
return cls(validated_config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(Configuration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause |
felipemontefuscolo/bitme | get_bitmex_candles.py | 1 | 4122 | #!/usr/bin/env python
import sys
import time
import swagger_client
from swagger_client.rest import ApiException
from utils.utils import smart_open
import argparse
import pandas as pd
MAX_NUM_CANDLES_BITMEX = 500
def print_file(file_or_stdout, api_instance, bin_size, partial, symbol, reverse, start_time, end_time):
chunks = split_in_chunks(start_time, end_time, MAX_NUM_CANDLES_BITMEX, bin_size)
with smart_open(file_or_stdout) as fh:
print("time,open,high,low,close,volume", file=fh)
num_pages = len(chunks)
for i in range(num_pages):
chunk = chunks[i]
s = chunk[0]
e = chunk[1]
count = (e - s) / pd.Timedelta(bin_size)
page = api_instance.trade_get_bucketed(
bin_size=bin_size,
partial=partial,
symbol=symbol,
count=count,
start=0.0,
reverse=reverse,
start_time=s,
end_time=e)
print("from {} to {}: {} candles downloaded".format(s, e, len(page)))
# TODO: bitmex has a bug where the high is not the highest value !!!!!
for line in reversed(page):
print(','.join([line.timestamp.strftime('%Y-%m-%dT%H:%M:%S'),
str(line.open),
str(max(line.high, line.open)),
str(min(line.low, line.open)),
str(line.close),
str(line.volume)]), file=fh)
sys.stdout.write(
"progress: completed %d out of %d pages (%.2f%%) \r" %
(i + 1, num_pages, 100 * float(i + 1) / num_pages))
sys.stdout.flush()
time.sleep(1.001)
print("")
def split_in_chunks(start: pd.Timedelta, end: pd.Timedelta, chunk_size: int, bucket_size: str):
i = start
r = []
dt = chunk_size * pd.Timedelta(bucket_size)
while i <= end:
r += [(i, min(end, i + dt))]
i += dt
return r
def get_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description="Get bitmex data")
parser.add_argument('-b', '--begin-time', type=pd.Timestamp, required=True, help="Example: '2018-04-01T00:00:01'")
parser.add_argument('-e', '--end-time', type=pd.Timestamp, required=True, help="Example: '2018-04-02T00:00:01'")
parser.add_argument('-s', '--symbol', type=str, default='XBTUSD',
help='Instrument symbol. Send a bare series (e.g. XBU) to get data for the nearest expiring'
'contract in that series. You can also send a timeframe, e.g. `XBU:monthly`. '
'Timeframes are `daily`, `weekly`, `monthly`, `quarterly`, and `biquarterly`. (optional)')
parser.add_argument('-z', '--bin-size', choices=('1m', '5m', '1h', '1d'), default='1m', type=str,
help='Time interval to bucket by')
parser.add_argument('-o', '--file-or-stdout', type=str, required=True, help='Output filename or "-" for stdout')
parser.add_argument('--partial', action='store_true', default=False, )
args = parser.parse_args(args, namespace)
return args
def main():
args = get_args()
# create an instance of the API class
configuration = swagger_client.Configuration()
configuration.host = 'https://www.bitmex.com/api/v1'
api_instance = swagger_client.TradeApi(swagger_client.ApiClient(configuration))
print("print to file " + (args.file_or_stdout if args.file_or_stdout is not '-' else 'std output'))
try:
print_file(file_or_stdout=args.file_or_stdout,
api_instance=api_instance,
bin_size=args.bin_size, partial=args.partial, symbol=args.symbol,
reverse=False,
start_time=args.begin_time, end_time=args.end_time)
except ApiException as e:
print("Exception when calling TradeApi->trade_get_bucketed: %s\n" % e)
return 0
if __name__ == "__main__":
sys.exit(main())
| mpl-2.0 |
alexeyum/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/learn/wide_n_deep_tutorial.py | 18 | 8111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
andyh616/mne-python | mne/tests/test_epochs.py | 1 | 71695 | # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (io, Epochs, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds)
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
def test_reject():
"""Test epochs rejection
"""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad_epochs(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad_epochs,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad_epochs(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
def test_decim():
"""Test epochs decimation
"""
# First with EpochsArray
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = np.random.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.decimate(decim, copy=True).get_data()
data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs, data_epochs_2)
# Now let's do it with some real data
raw, events, picks = _get_data()
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=False)
assert_raises(ValueError, epochs.decimate, -1)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload)
epochs.preload_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
np.random.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad_epochs()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
epochs_no_bl.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 1)
# decim without lowpass
lowpass = raw.info['lowpass']
raw.info['lowpass'] = None
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 2)
raw.info['lowpass'] = lowpass
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# test IO
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_no_bl_read.apply_baseline(baseline)
assert_true(epochs_no_bl_read.baseline == baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(), epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname)
assert_true(len(w) == 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax, picks=picks,
on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.preload_data()
epochs.crop(0, 0, copy=False)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = io.Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = evoked1 + evoked2
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
evoked_diff = evoked1 - evoked1
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad_epochs()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad_epochs()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
assert_true(epochs_resampled is epochs)
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_raises(Exception, caught == 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
epochs.add_proj(projs, remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
# delayed without reject params
assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='delayed', reject=None)
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, reject=reject)
epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload,
add_eeg_ref=True, reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data()[0],
epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs2.get_data(), epochs2.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs2._projector)
assert_allclose(epochs._data, epochs2.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection
"""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for preload in (True, False):
for proj in (True, False, 'delayed'):
for decim in (1, 3):
for ii in range(2):
epochs = Epochs(raw, events, event_id, tmin, tmax,
picks=picks, proj=proj, reject=reject,
preload=preload, decim=decim)
if ii == 1:
epochs.preload_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
if proj is True:
evoked.apply_proj()
epochs_data = epochs.get_data().mean(axis=0)[picks_data]
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
assert_allclose(evoked.data, epochs_data,
rtol=1e-5, atol=1e-15)
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
assert_raises(IndexError, epochs.drop_epochs, [-1])
assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop_epochs([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data()[:2]
tests = [(('mag', False), ('grad', 'eeg')),
(('grad', False), ('mag', 'eeg')),
((False, True), ('grad', 'mag'))]
for (meg, eeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
test = 'eeg' if eeg is True else meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info['ch_names'].pop(0)
epochs_meg.info['nchan'] -= 1
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0., baseline=None)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
tmin=-0.2, baseline=(None, 0))
def test_concatenate_epochs():
"""Test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
def test_add_channels():
"""Test epoch splitting / re-appending channel types
"""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
epoch_meg = epoch.pick_types(meg=True, copy=True)
epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
run_tests_if_main()
| bsd-3-clause |
ssh0/growing-string | triangular_lattice/diecutting/result_count_on_edge.py | 1 | 9360 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-12-16
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.cm as cm
import numpy as np
import set_data_path
class Visualizer(object):
def __init__(self, subjects):
self.data_path_list = set_data_path.data_path
if len(subjects) != 0:
for subject in subjects:
getattr(self, 'result_' + subject)()
def load_data(self, _path):
data = np.load(_path)
beta = data['beta']
try:
size_dist_ave = data['size_dist_ave']
if len(size_dist_ave) == 0:
raise KeyError
return self.load_data_averaged(_path)
except KeyError:
pass
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
size_dist = data['size_dist']
N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float) / num_of_strings
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def load_data_averaged(self, _path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
# size_dist = data['size_dist']
size_dist_ave = data['size_dist_ave']
N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist_ave:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float)
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_all = N_all
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n_all = 6 * Ls[1:]
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def result_N(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Occupied points in the cutting region' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N$')
plt.show()
def result_N_minus_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N_minus_rate[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('The rate of not occupied site in all N' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N_{-1} / N_{\mathrm{all}}$')
plt.show()
def result_n0(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n0, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is the only member of \
a subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{0}$')
plt.show()
def result_n1(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n1, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is connected to a \
existing subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{1}$')
plt.show()
def result_n2(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n2, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites on the cutting edges which \
is connected to two neighbors.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{2}$')
plt.show()
def result_n_minus(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n_minus, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is not occupied on \
the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{-1}$')
plt.show()
def result_S(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region.'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
def result_S_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
# ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
# ax.plot(self.Ls[1:], self.S[1:] / self.n_all, '.',
ax.plot(self.Ls[1:], self.S[1:] / self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region'
+ ' (normalized)'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
if __name__ == '__main__':
# subject: 'N', 'N_minus_rate', 'n0', 'n1', 'n2', 'n_minus', 'S'
main = Visualizer(
[
# 'N',
# 'N_minus_rate',
# 'n0',
# 'n1',
# 'n2',
# 'n_minus',
'S',
# 'S_rate'
]
)
| mit |
sumspr/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
riddlezyc/geolab | src/structure/Z.py | 1 | 1474 | # -*- coding: utf-8 -*-
# from framesplit import trajectory
# too slow using this module
import matplotlib.pyplot as plt
dirName = r"F:\simulations\asphaltenes\na-mont\TMBO-oil\water\373-continue/"
xyzName = 'all.xyz'
hetero = 'O' # 'oh' 'N' 'sp' 'O' 'Np' 'sp'
with open(dirName + xyzName, 'r') as foo:
coords = foo.readlines()
nAtoms = int(coords[0])
nFrames = int(len(coords) / (nAtoms + 2))
pos = []
for i in range(nFrames):
istart = i * (nAtoms + 2)
iend = (i + 1) * (nAtoms + 2)
pos.append(coords[istart:iend])
# for i in range(200):
# print coords[i]
heteroatom = 0
# all of my molecules have atoms less than 200
for i in range(200):
x = pos[0][i].split()[0]
if x == hetero:
heteroatom = i
break
heteroZ = []
for p in pos:
# print p[heteroatom].split()[0]
zx = float(p[heteroatom].split()[3])
if zx < 10:
zx = zx + 80
heteroZ.append(zx)
with open(dirName + 'heteroZ.dat', 'w') as foo:
for i, z in enumerate(heteroZ):
print >> foo, "%3d %8.5f" % (i, z)
# energy plot
plt.figure(0, figsize=(8, 4))
figName = dirName + 'heteroZ.png'
plt.title('z of heteroatom', fontsize=20)
plt.plot(range(len(heteroZ)-1), heteroZ[1:], linewidth=2)
plt.grid(True)
plt.xlabel('steps')
plt.ylabel('Z')
plt.axis([0, len(heteroZ)*1.1, 0, max(heteroZ)*1.1])
plt.savefig(figName, format='png', dpi=300)
plt.close()
| gpl-3.0 |
nmartensen/pandas | pandas/io/sql.py | 3 | 58612 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.common import (
is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
used.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
installed.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isna(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index._get_level_values(i))
column_names_and_types.append((text_type(idx_label),
idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notna_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnadata = col[~isna(col)]
if len(notnadata):
col_for_inference = notnadata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notna_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notna_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
installed.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| bsd-3-clause |
bmazin/SDR | Projects/ChannelizerSim/legacy/bin_width_1st_stage.py | 1 | 1524 |
import matplotlib.pyplot as plt
import scipy.signal
import numpy as np
import math
import random
from matplotlib.backends.backend_pdf import PdfPages
samples = 51200
L = samples/512
fs = 512e6
dt = 1/fs
time = [i*dt for i in range(samples)]
def pfb_fir(x):
N = len(x)
T = 4
L = 512
bin_width_scale = 2.5
dx = T*math.pi/L/T
X = np.array([n*dx-T*math.pi/2 for n in range(T*L)])
coeff = np.sinc(bin_width_scale*X/math.pi)*np.hanning(T*L)
y = np.array([0+0j]*(N-T*L))
for n in range((T-1)*L, N):
m = n%L
coeff_sub = coeff[L*T-m::-L]
y[n-T*L] = (x[n-(T-1)*L:n+L:L]*coeff_sub).sum()
return y
R = 100/5
#freqs = [i*1e5 + 6.0e6 for i in range(R)]
freqs = [i*5e4 + 6.0e6 for i in range(R*8)]
bin = []
bin_pfb = []
for f in freqs:
print f
signal = np.array([complex(math.cos(2*math.pi*f*t), math.sin(2*math.pi*f*t)) for t in time])
y = pfb_fir(signal)
bin_pfb.append(np.fft.fft(y[0:512])[10])
bin = np.array(bin)
bin_pfb = np.array(bin_pfb)
freqs = np.array(freqs)/1e6
b = scipy.signal.firwin(20, cutoff=0.125, window="hanning")
w,h = scipy.signal.freqz(b,1, 4*R, whole=1)
h = np.array(h[2*R:4*R].tolist()+h[0:2*R].tolist())
#h = np.array(h[20:40].tolist()+h[0:20].tolist())
fig = plt.figure()
ax0 = fig.add_subplot(111)
#ax0.plot(freqs, abs(fir9), '.', freqs, abs(fir10), '.', freqs, abs(fir11), '.')
ax0.plot(freqs, 10*np.log10(abs(bin_pfb)/512), 'k-')
ax0.set_xlabel('Frequency (MHz)')
ax0.set_ylabel('Gain (dB)')
ax0.set_ylim((-50,0))
plt.show()
#ax0.axvline(x = 10, linewidth=1, color='k')
| gpl-2.0 |
arrow-/simQuad | ground_station/gyro_scope.py | 2 | 5471 | '''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
IMPORTANT!!
It is suggested you run this script with mpu_level2.ino first to see and understand
its operation.
Basically this script EXPECTS:
Arduino is providing space separated gyro readings @ ~5ms intervals (via MPU Interrupt).
* Each serial packet must be ASCII and look like:
[x_gyro]<space>[y_gyro]<space>[z_gyro]<newline>
+ You need to specify correct Serial port
+ You need to set the Y-limits of the plot axis.
+ You need to use correct value of "dt".
+ You need to set the correct conversion factor for Gyro readings.
Mode 0 1 2 3
Range +-250 +-500 +-1000 +-2000
Conv. 131 65.5 32.75 16.375
AND it DELIVERS:
* 3 axis loss-less Gyro readings plot (almost real time).
* 3D visualisation of current orientation based on gyro vals
If you want to just plot data in ~real time use {oscilloscope.py}.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import serial, time
def rotate(v, axis, theta):
'''
Rotates "v" vector about "axis" vector by "theta" radians, returns vector
'''
c = np.cos(theta)
s = np.sin(theta)
t = 1-c
mat = np.array([ [c+axis[0]*axis[0]*t, axis[0]*axis[1]*t-axis[2]*s, axis[0]*axis[2]*t+axis[1]*s],
[axis[0]*axis[1]*t+axis[2]*s, c+axis[1]*axis[1]*t, axis[1]*axis[2]*t-axis[0]*s],
[axis[0]*axis[2]*t-axis[1]*s, axis[1]*axis[2]*t+axis[0]*s, c+axis[2]*axis[2]*t] ])
return mat.dot(v.T)
def calcPose(omega):
'''
Helper function. Finds the "d"-theta, then calls rotate.
Omega must be in ** degrees per second **
'''
theta = omega*dt*np.pi/180 #theta is "d-theta" in radians
rpy[1] = rotate(rpy[1], rpy[0], theta[0])
rpy[0] = rotate(rpy[0], rpy[1], theta[1])
rpy[2] = np.cross(rpy[0], rpy[1])
rpy[1] = rotate(rpy[1], rpy[2], theta[2])
rpy[0] = rotate(rpy[0], rpy[2], theta[2])
plt.ion()
# SET CORRECT PORT NUM HERE
arduino = serial.Serial('/dev/ttyACM0', 57600)
# dt is found experimentally. Contact Ananya for details. Basically this the time between
# 2 MPU(gyro) interrupts. The np.pi/180 converts deg/sec to rad/sec.
# SET CORRECT dt HERE. TIME IN SECONDS BETWEEN TWO SENSOR PACKETS AS RECVD. BY ARDUINO.
dt = .005 # 5msec
# rpy is original orientation. These vectors are updated by calcPose()
rpy = np.eye(3)
fig = plt.figure(figsize=(16,6))
axes = fig.add_subplot(121)
a3d = fig.add_subplot(122, projection='3d')
a3d.set_xlim(-1.2,1.2)
a3d.set_ylim(-1.2,1.2)
a3d.set_zlim(-1.2,1.2)
a3d.scatter([0], [0], [0], s=40)
r, = a3d.plot([0,1], [0,0], [0,0], lw=2, c='black')
p, = a3d.plot([0,0], [0,1], [0,0], lw=2, c='red')
a3d.plot([0,2], [0,0], [0,0], c='cyan')
a3d.plot([0,0], [0,2], [0,0], c='brown')
a3d.plot([0,0], [0,0], [0,2], c='green')
a3d.plot([0,-2], [0,0], [0,0], ls='--', c='cyan')
a3d.plot([0,0], [0,-2], [0,0], ls='--', c='brown')
a3d.plot([0,0], [0,0], [0,-2], ls='--', c='green')
num_samples = 0
buff = 0
# "buff" counts till 50. Every time it reaches fifty, plt.draw() is called, since
# plt.draw() is a costly operation. Normal list append and pose calculations are fast.
# So, do those diligently, for every sample, but update display
# rarely (while ensuring smooth animation).
gyro_x = [0]
gyro_y = [0] # gyro data lists. I use them like queues.
gyro_z = [0]
t = [0]
# scopes is a list of 3 matplotlib.Line_2D objects.
scopes = [axes.plot(t, gyro_x, label=r'$\omega_x$')[0], axes.plot(t, gyro_y, label=r'$\omega_y$')[0], axes.plot(t, gyro_z, label=r'$\omega_z$')[0]]
axes.legend(prop=dict(size=14))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
axes.set_ylim(-505, 505) # SET CORRECT Y-LIM HERE
conversion = 65.5 #Gyro 500 SET CORRECT CONV FACTOR HERE
# Refer datasheet. Convert ADC result into a Physical measurement.
# If you don't understand this, pls. leave project.
print 'Me Ready'
time.sleep(2)
#Handshake MAY BE REDUNDANT
print arduino.inWaiting()
arduino.flushInput()
arduino.write('e')
print 'Sent Request...'
data = [0]*6
while True:
try:
num = arduino.read(12)
num = [ord(x) for x in num]
except:
print 'Serial error!'
raise RuntimeError
_ind=0 #this var is connected to for loop below!!
for i in range(0,12, 2):
data[_ind] = (num[i]<<8)|num[i+1]
if data[_ind] & 0x8000:
data[_ind] = data[_ind] - 0x10000
_ind += 1
#print data[3:]
datas = np.array([float(data[3])/conversion, float(data[4])/conversion, float(data[5])/conversion])
gyro_x.append(datas[0])
gyro_y.append(datas[1])
gyro_z.append(datas[2])
num_samples += 1
t.append(num_samples)
calcPose(datas) #This function updates the global variable: "rpy"
if num_samples>200:
del t[0]
del gyro_x[0]
del gyro_y[0]
del gyro_z[0]
axes.set_xlim(t[0], num_samples)
scopes[0].set_data(t, gyro_x)
scopes[1].set_data(t, gyro_y)
scopes[2].set_data(t, gyro_z)
# pose matrix is just an easier way of giving input to the .set_data()
# and .set_3d_properties() methods. You see, line needs 2 (end) points:
# the rpy entries AND THE ORIGIN. pose matrix does just that: specifies
# BOTH end points.
pose = np.array([np.array([np.zeros(3), rpy[0]]).T, np.array([np.zeros(3), rpy[1]]).T, np.array([np.zeros(3), rpy[2]]).T])
r.set_data(pose[0][:2])
r.set_3d_properties(pose[0][2])
p.set_data(pose[1][:2])
p.set_3d_properties(pose[1][2])
if buff>25:
buff=0
plt.draw()
buff += 1
plt.ioff()
plt.show() | gpl-2.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/sql.py | 7 | 58343 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.common import (
is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=True)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isnull(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index._get_level_values(i))
column_names_and_types.append((text_type(idx_label),
idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notnull_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnulldata = col[~isnull(col)]
if len(notnulldata):
col_for_inference = notnulldata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| agpl-3.0 |
amozie/amozie | studzie/keras_gym/mountain_car_v0.py | 1 | 2577 | import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
class MountainCarEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self) -> None:
self.env = gym.make('MountainCar-v0')
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def _step(self, action):
step = self.env.step(action)
step = list(step)
step[1] = np.abs(step[0][1]) - 0.05
return tuple(step)
def _reset(self):
return self.env.reset()
def _seed(self, seed=None):
return self.env.seed(seed)
def _render(self, mode='human', close=False):
return self.env.render(mode, close)
def _close(self):
return self.env.close()
env = MountainCarEnv()
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32,
enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(500):
action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
state_list.append(reward)
env.render()
env.render(close=True)
dqn.test(env, nb_episodes=5, visualize=True)
env.render(close=True) | apache-2.0 |
danviv/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
lalitkumarj/NEXT-psych | next/apps/TupleBanditsPureExploration/Dashboard.py | 1 | 3313 | """
TupleBanditsPureExplorationDashboard
author: Nick Glattard, n.glattard@gmail.com
last updated: 4/24/2015
######################################
TupleBanditsPureExplorationDashboard
"""
import json
import numpy
import numpy.random
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from next.utils import utils
from next.apps.AppDashboard import AppDashboard
class TupleBanditsPureExplorationDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def get_app_supported_stats(self):
"""
Returns a list of dictionaries describing the identifier (stat_id) and
necessary params inputs to be used when calling getStats
Expected output (list of dicts, each with fields):
(string) stat_id : the identiifer of the statistic
(string) description : docstring of describing outputs
(list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task'
"""
stat_list = self.get_supported_stats()
stat = {}
stat['stat_id'] = 'most_current_ranking'
stat['description'] = self.most_current_ranking.__doc__
stat['necessary_params'] = ['alg_label']
stat_list.append(stat)
return stat_list
def most_current_ranking(self,app_id,exp_uid,alg_label):
"""
Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field' where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to
Expected output (in dict):
plot_type : 'columnar_table'
headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ]
(list of dicts with fields) data (each dict is a row, each field is the column for that row):
(int) index : index of target
(int) ranking : rank (0 to number of targets - 1) representing belief of being best arm
"""
alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list')
for algorithm in alg_list:
if algorithm['alg_label'] == alg_label:
alg_id = algorithm['alg_id']
alg_uid = algorithm['alg_uid']
list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid})
list_of_log_dict = sorted(list_of_log_dict, key=lambda k: k['num_reported_answers'] )
print didSucceed, message
item = list_of_log_dict[-1]
return_dict = {}
return_dict['headers'] = [{'label':'Rank','field':'rank'},{'label':'Target','field':'index'},{'label':'Score','field':'score'},{'label':'Precision','field':'precision'}]
return_dict['data'] = item['targets']
return_dict['plot_type'] = 'columnar_table'
return return_dict
| apache-2.0 |
elijah513/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/patches.py | 6 | 148732 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
import matplotlib.lines as mlines
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
_patch_alias_map = {
'antialiased': ['aa'],
'edgecolor': ['ec'],
'facecolor': ['fc'],
'linewidth': ['lw'],
'linestyle': ['ls']
}
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
# unscaled dashes. Needed to scale dash patterns by lw
self._us_dashes = None
self._linewidth = 0
self.set_fill(fill)
self.set_linestyle(linestyle)
self.set_linewidth(linewidth)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def _process_radius(self, radius):
if radius is not None:
return radius
if cbook.is_numlike(self._picker):
_radius = self._picker
else:
if self.get_edgecolor()[3] == 0:
_radius = 0
else:
_radius = self.get_linewidth()
return _radius
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
radius = self._process_radius(radius)
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
radius = self._process_radius(radius)
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
# For some properties we don't need or don't want to go through the
# getters/setters, so we just copy them directly.
self._edgecolor = other._edgecolor
self._facecolor = other._facecolor
self._fill = other._fill
self._hatch = other._hatch
# copy the unscaled dash pattern
self._us_dashes = other._us_dashes
self.set_linewidth(other._linewidth) # also sets dash properties
self.set_transform(other.get_data_transform())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
self.stale = True
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def _set_edgecolor(self, color):
if color is None:
if (mpl.rcParams['patch.force_edgecolor'] or
not self._fill or self._edge_default):
color = mpl.rcParams['patch.edgecolor']
else:
color = 'none'
self._edgecolor = colors.to_rgba(color, self._alpha)
self.stale = True
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, None, 'none', or 'auto'
"""
self._original_edgecolor = color
self._set_edgecolor(color)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def _set_facecolor(self, color):
if color is None:
color = mpl.rcParams['patch.facecolor']
alpha = self._alpha if self._fill else 0
self._facecolor = colors.to_rgba(color, alpha)
self.stale = True
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
self._original_facecolor = color
self._set_facecolor(color)
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
# stale is already True
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
if w is None:
w = mpl.rcParams['axes.linewidth']
self._linewidth = float(w)
# scale the dash pattern by the linewidth
offset, ls = self._us_dashes
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink
in points.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) |
``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
``' '`` | ``''``]
Parameters
----------
ls : { '-', '--', '-.', ':'} and more see description
The line style.
"""
if ls is None:
ls = "solid"
self._linestyle = ls
# get the unscalled dash pattern
offset, ls = self._us_dashes = mlines._get_dash_pattern(ls)
# scale the dash pattern by the linewidth
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
self.stale = True
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,) +
'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
self.stale = True
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,) +
'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
self.stale = True
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(0, self._dashes)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
self.stale = False
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.to_rgba(self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = float(xy[0])
self._y = float(xy[1])
self._width = float(width)
self._height = float(height)
self._angle = float(angle)
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
_edge_default = True
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.hypot(dx, dy)
if L != 0:
cx = float(dx) / L
sx = float(dy) / L
else:
# Account for division by zero
cx, sx = 0, 1
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
_edge_default = True
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.hypot(dx, dy)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
# if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
# if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
# figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-2],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
if distance != 0:
cx = float(dx) / distance
sx = float(dy) / distance
else:
#Account for division by zero
cx, sx = 0, 1
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
self.radius = radius
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
self.stale = True
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
# self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
if six.PY2:
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefs,
annotations) = inspect.getfullargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
# adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
def _simpleprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a string rep of the list of keys.
Used to update the documentation.
"""
styles = "[ \'"
styles += "\' | \'".join(str(i) for i in sorted(_styles.keys()))
styles += "\' ]"
return styles
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBboxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class DArrow(_Base):
"""
(Double) Arrow Box
"""
# This source is copied from LArrow,
# modified to add a right arrow to the bbox.
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.DArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
# The width is padded by the arrows, so we don't need to pad it.
height = height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0)/2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), # bot-segment
(x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
(x1, y1 + dxx), # right-arrow
(x1, y1), (x0 + dxx, y1), # top-segment
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # left-arrow
(x0 + dxx, y0), (x0 + dxx, y0)] # close-poly
com = [Path.MOVETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list['darrow'] = DArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, height + 2. * pad
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = (width + 2. * pad - 2 * dr,
height + 2. * pad - 2 * dr)
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = (width + 2. * pad - tooth_size,
height + 2. * pad - tooth_size)
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(np.round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(np.round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = (list(zip(bottom_saw_x, bottom_saw_y)) +
list(zip(right_saw_x, right_saw_y)) +
list(zip(top_saw_x, top_saw_y)) +
list(zip(left_saw_x, left_saw_y)) +
[(bottom_saw_x[0], bottom_saw_y[0])])
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list),
ListBoxstyles=_simpleprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
_edge_default = True
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.stale = True
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
The following boxstyles are available:
%(AvailableBoxstyles)s
ACCEPTS: %(ListBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
self.stale = True
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrunken.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrunk_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrunk_path
def __reduce__(self):
# because we have decided to nest these classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arms is extended so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
Parameters
----------
armA : float
minimum length of armA
armB : float
minimum length of armB
fraction : float
a fraction of the distance between two points that
will be added to armA and armB.
angle : float or None
angle of the connecting line (if None, parallel
to A and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
if ((len(segments) != 2) or (segments[0][1] != Path.MOVETO) or
(segments[1][1] != Path.CURVE3)):
msg = "'path' it's not a valid quadratic bezier curve"
raise ValueError(msg)
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the arrow head etc.
will be scaled. The linewidth may be used to adjust
the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrunk = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = (self.beginarrow and
not ((x0 == x1) and (y0 == y1)))
if has_begin_arrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = (self.endarrow and not ((x2 == x3) and (y2 == y3)))
if has_end_arrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if has_begin_arrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if has_end_arrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA,
lengthA=lengthA,
angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB,
lengthB=lengthB,
angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
_edge_default = True
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shrunken by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
"""
self._dpi_cor = dpi_cor
self.stale = True
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin and end positions of the connecting
path. Use current value if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
self.stale = True
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
provided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
self.stale = True
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
self.stale = True
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(self._dashoffset, self._dashes)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
self.stale = False
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| gpl-3.0 |
petosegan/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/tsa/base/tests/test_base.py | 27 | 2106 | import numpy as np
from pandas import Series
from pandas import date_range
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
def test_pandas_nodates_index():
from statsmodels.datasets import sunspots
y = sunspots.load_pandas().data.SUNACTIVITY
npt.assert_raises(ValueError, TimeSeriesModel, y)
def test_predict_freq():
# test that predicted dates have same frequency
x = np.arange(1,36.)
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
#npt.assert_(model.data.freq == "AS-APR")
npt.assert_(model.data.freq == "A-APR")
start = model._get_predict_start("2006-4-30")
end = model._get_predict_end("2016-4-30")
model._make_predict_dates()
predict_dates = model.data.predict_dates
#expected_dates = date_range("2006-12-31", "2016-12-31",
# freq="AS-APR")
expected_dates = date_range("2006-4-30", "2016-4-30", freq="A-APR")
assert_equal(predict_dates, expected_dates)
#ptesting.assert_series_equal(predict_dates, expected_dates)
def test_keyerror_start_date():
x = np.arange(1,36.)
from pandas import date_range
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
npt.assert_raises(ValueError, model._get_predict_start, "1970-4-30")
def test_period_index():
# test 1285
from pandas import PeriodIndex, TimeSeries
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
x = np.arange(1, 21.)
model = TimeSeriesModel(Series(x, index=dates))
npt.assert_(model.data.freq == "M")
model = TimeSeriesModel(TimeSeries(x, index=dates))
npt.assert_(model.data.freq == "M")
| bsd-3-clause |
sunshinelover/chanlun | vn.trader/ctaAlgo/uiChanlunWidget.py | 1 | 68647 | # encoding: UTF-8
"""
缠论模块相关的GUI控制组件
"""
from vtGateway import VtSubscribeReq
from uiBasicWidget import QtGui, QtCore, BasicCell,BasicMonitor,TradingWidget
from eventEngine import *
from ctaBase import *
import pyqtgraph as pg
import numpy as np
import pymongo
from pymongo.errors import *
from datetime import datetime, timedelta
from ctaHistoryData import HistoryDataEngine
import time
import types
import pandas as pd
########################################################################
class MyStringAxis(pg.AxisItem):
def __init__(self, xdict, *args, **kwargs):
pg.AxisItem.__init__(self, *args, **kwargs)
self.x_values = np.asarray(xdict.keys())
self.x_strings = xdict.values()
def tickStrings(self, values, scale, spacing):
strings = []
for v in values:
# vs is the original tick value
vs = v * scale
# if we have vs in our values, show the string
# otherwise show nothing
if vs in self.x_values:
# Find the string with x_values closest to vs
vstr = self.x_strings[np.abs(self.x_values - vs).argmin()]
else:
vstr = ""
strings.append(vstr)
return strings
########################################################################
class ChanlunEngineManager(QtGui.QWidget):
"""chanlun引擎管理组件"""
signal = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, chanlunEngine, eventEngine, mainEngine, parent=None):
"""Constructor"""
super(ChanlunEngineManager, self).__init__(parent)
self.chanlunEngine = chanlunEngine
self.eventEngine = eventEngine
self.mainEngine = mainEngine
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
self.instrumentid = ''
self.initUi()
self.registerEvent()
# 记录日志
self.chanlunEngine.writeChanlunLog(u'缠论引擎启动成功')
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'缠论策略')
# 期货代码输入框
self.codeEdit = QtGui.QLineEdit()
self.codeEdit.setPlaceholderText(u'在此输入期货代码')
self.codeEdit.setMaximumWidth(200)
self.data = pd.DataFrame() #画图所需数据, 重要
self.fenX = [] #分笔分段所需X轴坐标
self.fenY = [] #分笔分段所需Y轴坐标
self.zhongshuPos = [] #中枢的位置
self.zhongShuType = [] #中枢的方向
# 金融图
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.TickW = None
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
# 按钮
penButton = QtGui.QPushButton(u'分笔')
segmentButton = QtGui.QPushButton(u'分段')
zhongshuButton = QtGui.QPushButton(u'走势中枢')
shopButton = QtGui.QPushButton(u'买卖点')
restoreButton = QtGui.QPushButton(u'还原')
penButton.clicked.connect(self.pen)
segmentButton.clicked.connect(self.segment)
zhongshuButton.clicked.connect(self.zhongShu)
shopButton.clicked.connect(self.shop)
restoreButton.clicked.connect(self.restore)
# Chanlun组件的日志监控
self.chanlunLogMonitor = QtGui.QTextEdit()
self.chanlunLogMonitor.setReadOnly(True)
self.chanlunLogMonitor.setMaximumHeight(180)
# 设置布局
self.hbox2 = QtGui.QHBoxLayout()
self.hbox2.addWidget(self.codeEdit)
self.hbox2.addWidget(penButton)
self.hbox2.addWidget(segmentButton)
self.hbox2.addWidget(zhongshuButton)
self.hbox2.addWidget(shopButton)
self.hbox2.addWidget(restoreButton)
self.hbox2.addStretch()
tickButton = QtGui.QPushButton(u'Tick')
oneMButton = QtGui.QPushButton(u"1分")
fiveMButton = QtGui.QPushButton(u'5分')
fifteenMButton = QtGui.QPushButton(u'15分')
thirtyMButton = QtGui.QPushButton(u'30分')
sixtyMButton = QtGui.QPushButton(u'60分')
dayButton = QtGui.QPushButton(u'日')
weekButton = QtGui.QPushButton(u'周')
monthButton = QtGui.QPushButton(u'月')
oneMButton.checked = True
self.vbox1 = QtGui.QVBoxLayout()
tickButton.clicked.connect(self.openTick)
oneMButton.clicked.connect(self.oneM)
fiveMButton.clicked.connect(self.fiveM)
fifteenMButton.clicked.connect(self.fifteenM)
thirtyMButton.clicked.connect(self.thirtyM)
sixtyMButton.clicked.connect(self.sixtyM)
dayButton.clicked.connect(self.daily)
weekButton.clicked.connect(self.weekly)
monthButton.clicked.connect(self.monthly)
self.vbox2 = QtGui.QVBoxLayout()
self.vbox1.addWidget(self.PriceW)
self.vbox2.addWidget(tickButton)
self.vbox2.addWidget(oneMButton)
self.vbox2.addWidget(fiveMButton)
self.vbox2.addWidget(fifteenMButton)
self.vbox2.addWidget(thirtyMButton)
self.vbox2.addWidget(sixtyMButton)
self.vbox2.addWidget(dayButton)
self.vbox2.addWidget(weekButton)
self.vbox2.addWidget(monthButton)
self.vbox2.addStretch()
self.hbox3 = QtGui.QHBoxLayout()
self.hbox3.addStretch()
self.hbox3.addLayout(self.vbox1)
self.hbox3.addLayout(self.vbox2)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addWidget(self.chanlunLogMonitor)
self.setLayout(self.vbox)
self.codeEdit.returnPressed.connect(self.updateSymbol)
#-----------------------------------------------------------------------
#从通联数据端获取历史数据
def downloadData(self, symbol, unit):
listBar = [] #K线数据
num = 0
#从通联客户端获取K线数据
historyDataEngine = HistoryDataEngine()
# unit为int型获取分钟数据,为String类型获取日周月K线数据
if type(unit) is types.IntType:
#从通联数据端获取当日分钟数据并存入数据库
historyDataEngine.downloadFuturesIntradayBar(symbol, unit)
# 从数据库获取前几天的分钟数据
cx = self.getDbData(symbol, unit)
if cx:
for data in cx:
barOpen = data['open']
barClose = data['close']
barLow = data['low']
barHigh = data['high']
barTime = data['datetime']
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
elif type(unit) is types.StringType:
data = historyDataEngine.downloadFuturesBar(symbol, unit)
if data:
for d in data:
barOpen = d.get('openPrice', 0)
barClose = d.get('closePrice', 0)
barLow = d.get('lowestPrice', 0)
barHigh = d.get('highestPrice', 0)
if unit == "daily":
barTime = d.get('tradeDate', '').replace('-', '')
else:
barTime = d.get('endDate', '').replace('-', '')
listBar.append((num, barTime, barOpen, barClose, barLow, barHigh))
num += 1
if unit == "monthly" or unit == "weekly":
listBar.reverse()
else:
print "参数格式错误"
return
#将List数据转换成dataFormat类型,方便处理
df = pd.DataFrame(listBar, columns=['num', 'time', 'open', 'close', 'low', 'high'])
df.index = df['time'].tolist()
df = df.drop('time', 1)
return df
#-----------------------------------------------------------------------
#从数据库获取前两天的分钟数据
def getDbData(self, symbol, unit):
#周六周日不交易,无分钟数据
# 给数据库命名
dbname = ''
days = 7
if unit == 1:
dbname = MINUTE_DB_NAME
elif unit == 5:
dbname = MINUTE5_DB_NAME
elif unit == 15:
dbname = MINUTE15_DB_NAME
elif unit == 30:
dbname = MINUTE30_DB_NAME
elif unit == 60:
dbname = MINUTE60_DB_NAME
weekday = datetime.now().weekday() # weekday() 返回的是0-6是星期一到星期日
if days == 2:
if weekday == 6:
aDay = timedelta(days=3)
elif weekday == 0 or weekday == 1:
aDay = timedelta(days=4)
else:
aDay = timedelta(days=2)
else:
aDay = timedelta(days=7)
startDate = (datetime.now() - aDay).strftime('%Y%m%d')
print startDate
if self.__mongoConnected:
collection = self.__mongoConnection[dbname][symbol]
cx = collection.find({'date': {'$gte': startDate}})
return cx
else:
return None
#----------------------------------------------------------------------------------
#"""合约变化"""
def updateSymbol(self):
# 读取组件数据
instrumentid = str(self.codeEdit.text())
self.chanlunEngine.writeChanlunLog(u'查询合约%s' % (instrumentid))
# 从通联数据客户端获取当日分钟数据
self.data = self.downloadData(instrumentid, 1)
if self.data.empty:
self.chanlunEngine.writeChanlunLog(u'合约%s 不存在' % (instrumentid))
else:
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (instrumentid))
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
self.zhongShuLoaded = False
# # 订阅合约[仿照ctaEngine.py写的]
# # 先取消订阅之前的合约,再订阅最新输入的合约
# contract = self.mainEngine.getContract(self.instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.unsubscribe(req, contract.gatewayName)
#
# contract = self.mainEngine.getContract(instrumentid)
# if contract:
# req = VtSubscribeReq()
# req.symbol = contract.symbol
# self.mainEngine.subscribe(req, contract.gatewayName)
# else:
# self.chanlunEngine.writeChanlunLog(u'交易合约%s无法找到' % (instrumentid))
#
# # 重新注册事件监听
# self.eventEngine.unregister(EVENT_TICK + self.instrumentid, self.signal.emit)
# self.eventEngine.register(EVENT_TICK + instrumentid, self.signal.emit)
# 更新目前的合约
self.instrumentid = instrumentid
def oneM(self):
"打开1分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 1)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fiveM(self):
"打开5分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 5分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 5)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def fifteenM(self):
"打开15分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 15分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 15)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def thirtyM(self):
"打开30分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 30分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 30)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def sixtyM(self):
"打开60分钟K线图"
self.chanlunEngine.writeChanlunLog(u'打开合约%s 60分钟K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, 60)
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def daily(self):
"""打开日K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 日K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "daily")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def weekly(self):
"""打开周K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 周K线图' % (self.instrumentid))
# 从通联数据客户端获取数据
self.data = self.downloadData(self.instrumentid, "weekly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
def monthly(self):
"""打开月K线图"""
self.chanlunEngine.writeChanlunLog(u'打开合约%s 月K线图' % (self.instrumentid))
# 从通联数据客户端获取数据并画图
self.data = self.downloadData(self.instrumentid, "monthly")
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.tickLoaded = False
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def openTick(self):
"""切换成tick图"""
self.chanlunEngine.writeChanlunLog(u'打开tick图')
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.TickW = TickWidget(self.eventEngine, self.chanlunEngine)
self.vbox1.addWidget(self.TickW)
self.tickLoaded = True
self.penLoaded = False
self.segmentLoaded = False
self.zhongShuLoaded = False
# ----------------------------------------------------------------------
def restore(self):
"""还原初始k线状态"""
self.chanlunEngine.writeChanlunLog(u'还原加载成功')
if self.tickLoaded:
self.vbox1.removeWidget(self.TickW)
self.TickW.deleteLater()
else:
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.data = self.downloadData(self.instrumentid, 1)
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data, self)
self.vbox1.addWidget(self.PriceW)
# 画K线图
self.PriceW.plotHistorticData()
self.chanlunEngine.writeChanlunLog(u'还原为1分钟k线图')
self.penLoaded = False
self.segmentLoaded = False
self.tickLoaded = False
# ----------------------------------------------------------------------
def pen(self):
"""加载分笔"""
# 先合并K线数据,记录新建PriceW之前合并K线的数据
if not self.penLoaded:
after_fenxing = self.judgeInclude() #判断self.data中K线数据的包含关系
# 清空画布时先remove已有的Widget再新建
self.vbox1.removeWidget(self.PriceW)
self.PriceW.deleteLater()
self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, after_fenxing)
self.vbox1.addWidget(self.PriceW)
#使用合并K线的数据重新画K线图
self.plotAfterFenXing(after_fenxing)
# 找出顶和底
fenxing_data, fenxing_type = self.findTopAndLow(after_fenxing)
arrayFenxingdata = np.array(fenxing_data)
arrayTypedata = np.array(fenxing_type)
self.fenY = []
self.fenX = [m[0] for m in arrayFenxingdata]
fenbiY1 = [m[4] for m in arrayFenxingdata] # 顶分型标志最高价
fenbiY2 = [m[3] for m in arrayFenxingdata] # 底分型标志最低价
for i in xrange(len(self.fenX)):
if arrayTypedata[i] == 1:
self.fenY.append(fenbiY1[i])
else:
self.fenY.append(fenbiY2[i])
if not self.penLoaded:
if self.fenX:
self.fenX.append(self.fenX[-1])
self.fenY.append(self.fenY[-1])
print "self.fenX: ", self.fenX
print "self.fenY: ", self.fenY
self.fenbi(self.fenX, self.fenY)
self.fenX.pop()
self.fenY.pop()
self.chanlunEngine.writeChanlunLog(u'分笔加载成功')
self.penLoaded = True
# ----------------------------------------------------------------------
def segment(self):
if not self.penLoaded:
self.pen() #先分笔才能分段
segmentX = [] #分段点X轴值
segmentY = [] #分段点Y轴值
temp_type = 0 #标志线段方向,向上为1,向下为-1, 未判断前三笔是否重合为0
i = 0
while i < len(self.fenX) - 4:
if temp_type == 0:
if self.fenY[i] > self.fenY[i+1] and self.fenY[i] > self.fenY[i+3]:
temp_type = -1 #向下线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
elif self.fenY[i] < self.fenY[i+1] and self.fenY[i] < self.fenY[i+3]:
temp_type = 1 #向上线段,三笔重合
segmentX.append(self.fenX[i])
segmentY.append(self.fenY[i])
else:
temp_type = 0
i += 1
continue
if temp_type == 1: #向上线段
j = i+1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: #记录顶底
high.append(self.fenY[j])
low.append(self.fenY[j+1])
j += 2
if self.fenY[i+4] < self.fenY[i+1]: #向上线段被向下笔破坏
j = 0
while j < len(high)-2:
# 顶底出现顶分型,向上线段结束
if high[j+1] > high[j] and high[j+1] > high[j+2]:
num = i + 2 * j + 3 #线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 #向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
else: #向上线段未被向下笔破坏
j = 1
while j < len(high)-2:
# 顶底出现底分型,向上线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = -1 # 向上线段一定由向下线段结束
break
j += 1
if j == len(high)-2:
break
elif temp_type == -1: # 向下线段
j = i + 1
high = [] # 记录顶
low = [] # 记录低
while j < len(self.fenX) - 1: # 记录顶底
high.append(self.fenY[j + 1])
low.append(self.fenY[j])
j += 2
if self.fenY[i + 4] > self.fenY[i + 1]: # 向下线段被向上笔破坏
j = 0
while j < len(high) - 2:
# 顶底出现底分型,向下线段结束
if low[j + 1] < low[j] and low[j + 1] < low[j + 2]:
num = i + 2 * j + 3 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
else: # 向下线段未被向上笔破坏
j = 1
while j < len(high) - 2:
# 顶底出现顶分型,向下线段结束
if high[j + 1] > high[j] and high[j + 1] > high[j + 2]:
num = i + 2 * j + 1 # 线段结束点位置
segmentX.append(self.fenX[num])
segmentY.append(self.fenY[num])
i = num
temp_type = 1 # 向下线段一定由向上线段结束
break
j += 1
if j == len(high) - 2:
break
print "segmentX: ", segmentX
print "segmentY: ", segmentY
if not self.segmentLoaded:
if len(segmentX) > 1:
segmentX.append(segmentX[-1])
segmentY.append(segmentY[-1])
segmentX = [int(x) for x in segmentX]
segmentY = [int(y) for y in segmentY]
self.fenduan(segmentX, segmentY)
self.chanlunEngine.writeChanlunLog(u'分段加载成功')
self.segmentLoaded = True
# ----------------------------------------------------------------------
def updateChanlunLog(self, event):
"""更新缠论相关日志"""
log = event.dict_['data']
# print type(log)
if(log.logTime):
content = '\t'.join([log.logTime, log.logContent])
self.chanlunLogMonitor.append(content)
else:
print 0
#-----------------------------------------------------------------------
def zhongShu(self):
if not self.penLoaded:
self.pen() # 先分笔才能画走势中枢
# temp_type = 0 # 标志中枢方向,向上为1,向下为-1
i = 0
temp_high, temp_low = 0, 0
minX, maxY = 0, 0
self.zhongshuPos = [] # 记录所有的中枢开始段和结束段的位置
self.zhongShuType = [] #记录所有中枢的方向
while i < len(self.fenX) - 4:
if (self.fenY[i] > self.fenY[i + 1] and self.fenY[i + 1] < self.fenY[i + 4]): #判断进入段方向
temp_low = max(self.fenY[i + 1], self.fenY[i + 3])
temp_high = min(self.fenY[i + 2], self.fenY[i + 4]) #记录中枢内顶的最小值与底的最大值
minX = self.fenX[i+1]
self.zhongshuPos.append(i)
self.zhongShuType.append(-1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high :
maxX = self.fenX[i+4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low :
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
if j == i:
break
elif (self.fenY[i] < self.fenY[i + 1] and self.fenY[i + 1] > self.fenY[i + 4]):
temp_high = min(self.fenY[i + 1], self.fenY[i + 3])
temp_low = max(self.fenY[i + 2], self.fenY[i + 4])
minX = self.fenX[i + 1]
self.zhongshuPos.append(i)
self.zhongShuType.append(1)
j = i
while i < len(self.fenX) - 4:
j = i
if self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] < temp_high:
temp_high = self.fenY[i + 3]
if self.fenY[i + 4] > temp_low:
temp_low = self.fenY[i + 4]
i = i + 1
elif self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high:
maxX = self.fenX[i + 4]
if self.fenY[i + 3] > temp_low:
temp_low = self.fenY[i + 3]
if self.fenY[i + 4] < temp_high:
temp_high = self.fenY[i + 4]
i = i + 1
if i == j:
break
else:
i += 1
continue
# 画出当前判断出的中枢
if minX != 0 and maxX == 0:
maxX = self.fenX[i+4]
i = i + 1
self.zhongshuPos.append(i + 4)
else:
self.zhongshuPos.append(i + 3)
minY, maxY = temp_low, temp_high
print minX, minY, maxX, maxY
if int(maxY) > int(minY):
plotX = [minX, minX, maxX, maxX, minX]
plotY = [minY, maxY, maxY, minY, minY]
plotX = [int(x) for x in plotX]
plotY = [int(y) for y in plotY]
self.zhongshu(plotX, plotY)
i = i + 4
self.zhongShuLoaded = True
self.chanlunEngine.writeChanlunLog(u'走势中枢加载成功')
# ----------------------------------------------------------------------
def shop(self):
"""加载买卖点"""
if not self.zhongShuLoaded:
self.zhongShu()
i = 0
while i < len(self.zhongShuType) - 1:
startPos, endPos = self.zhongshuPos[2*i], self.zhongshuPos[2*i + 1] # 中枢开始段的位置和结束段的位置
startY = self.fenY[startPos + 1] - self.fenY[startPos] # 开始段Y轴距离
startX = self.fenX[startPos + 1] - self.fenX[startPos] # 开始段X轴距离
startK = abs(startY * startX) # 开始段投影面积
endY = self.fenY[endPos + 1] - self.fenY[endPos] # 结束段Y轴距离
endX = self.fenX[endPos + 1] - self.fenX[endPos] # 结束段段X轴距离
endK = abs(endY * endX) # 开始段投影面积
if endK < startK:
print startPos, endPos
if self.zhongShuType[i] == 1 and self.zhongShuType[i + 1] == -1:
# 一卖
self.sellpoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二卖,一卖后一个顶点
self.sellpoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三卖,一卖之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] > self.fenY[nextPos]:
self.sellpoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.sellpoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
elif self.zhongShuType[i] == -1 and self.zhongShuType[i + 1] == 1:
# 一买
self.buypoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1)
# 二买,一买后一个底点
self.buypoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2)
# 三买,一买之后中枢结束段的第一个顶
i = i + 1
nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置
if nextPos + 1 < len(self.fenY):
if self.fenY[nextPos + 1] < self.fenY[nextPos]:
self.buypoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3)
else:
self.buypoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3)
i = i + 1 # 接着判断之后的中枢是否出现背驰
self.chanlunEngine.writeChanlunLog(u'买卖点加载成功')
# ----------------------------------------------------------------------
def fenbi(self, fenbix, fenbiy):
self.PriceW.pw2.plotItem.plot(x=fenbix, y=fenbiy, pen=QtGui.QPen(QtGui.QColor(255, 236, 139)))
def fenduan(self, fenduanx, fenduany):
self.PriceW.pw2.plot(x=fenduanx, y=fenduany, symbol='o', pen=QtGui.QPen(QtGui.QColor(131, 111, 255)))
def zhongshu(self, zhongshux, zhongshuy):
self.PriceW.pw2.plot(x=zhongshux, y=zhongshuy, pen=QtGui.QPen(QtGui.QColor(255,165,0)))
def buypoint(self, buyx, buyy, point):
if point == 1:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(255,0,0), symbolPen=(255,0,0), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(238,130,238), symbolPen=(238,130,238),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(138,43,226), symbolPen=(138,43,226),symbol='star')
def sellpoint(self, sellx, selly, point):
if point == 1:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(119,172,48), symbolPen=(119,172,48), symbol='star')
elif point == 2:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(221,221,34), symbolPen=(221,221,34),symbol='star')
elif point == 3:
self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(179,158,77), symbolPen=(179,158,77),symbol='star')
# ----------------------------------------------------------------------
# 判断包含关系,仿照聚框,合并K线数据
def judgeInclude(self):
## 判断包含关系
k_data = self.data
# 保存分型后dataFrame的值
after_fenxing = pd.DataFrame()
temp_data = k_data[:1]
zoushi = [3] # 3-持平 4-向下 5-向上
for i in xrange(len(k_data)):
case1 = temp_data.high[-1] >= k_data.high[i] and temp_data.low[-1] <= k_data.low[i] # 第1根包含第2根
case2 = temp_data.high[-1] <= k_data.high[i] and temp_data.low[-1] >= k_data.low[i] # 第2根包含第1根
case3 = temp_data.high[-1] == k_data.high[i] and temp_data.low[-1] == k_data.low[i] # 第1根等于第2根
case4 = temp_data.high[-1] > k_data.high[i] and temp_data.low[-1] > k_data.low[i] # 向下趋势
case5 = temp_data.high[-1] < k_data.high[i] and temp_data.low[-1] < k_data.low[i] # 向上趋势
if case3:
zoushi.append(3)
continue
elif case1:
print temp_data
if zoushi[-1] == 4:
temp_data.ix[0, 4] = k_data.high[i] #向下走取高点的低点
else:
temp_data.ix[0, 3] = k_data.low[i] #向上走取低点的高点
elif case2:
temp_temp = temp_data[-1:]
temp_data = k_data[i:i + 1]
if zoushi[-1] == 4:
temp_data.ix[0, 4] = temp_temp.high[0]
else:
temp_data.ix[0, 3] = temp_temp.low[0]
elif case4:
zoushi.append(4)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
elif case5:
zoushi.append(5)
after_fenxing = pd.concat([after_fenxing, temp_data], axis=0)
temp_data = k_data[i:i + 1]
return after_fenxing
# ----------------------------------------------------------------------
#画出合并后的K线图,分笔
def plotAfterFenXing(self, after_fenxing):
#判断包含关系,合并K线
for i in xrange(len(after_fenxing)):
#处理k线的最大最小值、开盘收盘价,合并后k线不显示影线。
after_fenxing.iloc[i, 0] = i
if after_fenxing.open[i] > after_fenxing.close[i]:
after_fenxing.iloc[i, 1] = after_fenxing.high[i]
after_fenxing.iloc[i, 2] = after_fenxing.low[i]
else:
after_fenxing.iloc[i, 1] = after_fenxing.low[i]
after_fenxing.iloc[i, 2] = after_fenxing.high[i]
self.PriceW.onBarAfterFenXing(i, after_fenxing.index[i], after_fenxing.open[i], after_fenxing.close[i], after_fenxing.low[i], after_fenxing.high[i])
self.PriceW.plotKlineAfterFenXing()
print "plotKLine after fenxing"
# ----------------------------------------------------------------------
# 找出顶和底
def findTopAndLow(self, after_fenxing):
temp_num = 0 # 上一个顶或底的位置
temp_high = 0 # 上一个顶的high值
temp_low = 0 # 上一个底的low值
temp_type = 0 # 上一个记录位置的类型
i = 1
fenxing_type = [] # 记录分型点的类型,1为顶分型,-1为底分型
fenxing_data = pd.DataFrame() # 分型点的DataFrame值
while (i < len(after_fenxing) - 1):
case1 = after_fenxing.high[i - 1] < after_fenxing.high[i] and after_fenxing.high[i] > after_fenxing.high[i + 1] # 顶分型
case2 = after_fenxing.low[i - 1] > after_fenxing.low[i] and after_fenxing.low[i] < after_fenxing.low[i + 1] # 底分型
if case1:
if temp_type == 1: # 如果上一个分型为顶分型,则进行比较,选取高点更高的分型
if after_fenxing.high[i] <= temp_high:
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif temp_type == 2: # 如果上一个分型为底分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_low >= after_fenxing.high[i]: # 如果上一个底分型的底比当前顶分型的顶高,则跳过当前顶分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(-1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
else:
temp_high = after_fenxing.high[i]
temp_num = i
temp_type = 1
i += 1
elif case2:
if temp_type == 2: # 如果上一个分型为底分型,则进行比较,选取低点更低的分型
if after_fenxing.low[i] >= temp_low:
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
elif temp_type == 1: # 如果上一个分型为顶分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型
if temp_high <= after_fenxing.low[i]: # 如果上一个顶分型的底比当前底分型的底低,则跳过当前底分型。
i += 1
elif i < temp_num + 4: # 顶和底至少5k线
i += 1
else:
fenxing_type.append(1)
fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
temp_low = after_fenxing.low[i]
temp_num = i
temp_type = 2
i += 1
else:
i += 1
# if fenxing_type:
# if fenxing_type[-1] == 1 and temp_type == 2:
# fenxing_type.append(-1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
#
# if fenxing_type[-1] == -1 and temp_type == 1:
# fenxing_type.append(1)
# fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0)
return fenxing_data, fenxing_type
# ----------------------------------------------------------------------
# 连接MongoDB数据库
def __connectMongo(self):
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
except ConnectionFailure:
pass
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.signal.connect(self.updateChanlunLog)
self.eventEngine.register(EVENT_CHANLUN_LOG, self.signal.emit)
########################################################################
class PriceWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
symbol = ''
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (n, t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(n, min), QtCore.QPointF(n, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(n-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, data, parent=None):
"""Constructor"""
super(PriceWidget, self).__init__(parent)
# K线图EMA均线的参数、变量
self.EMAFastAlpha = 0.0167 # 快速EMA的参数,60
self.EMASlowAlpha = 0.0083 # 慢速EMA的参数,120
self.fastEMA = 0 # 快速EMA的数值
self.slowEMA = 0 # 慢速EMA的数值
self.listfastEMA = []
self.listslowEMA = []
# 保存K线数据的列表对象
self.listBar = []
self.listClose = []
self.listHigh = []
self.listLow = []
self.listOpen = []
# 是否完成了历史数据的读取
self.initCompleted = False
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
self.data = data #画图所需数据
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
# 调用函数
self.__connectMongo()
self.initUi()
# self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Price')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotKline() # plotKline初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotKline(self):
"""Kline"""
s = self.data.index #横坐标值
print "numbers of KLine: ", len(s)
xdict = dict(enumerate(s))
self.__axisTime = MyStringAxis(xdict, orientation='bottom')
self.pw2 = pg.PlotWidget(axisItems={'bottom': self.__axisTime}) # K线图
pw2x = self.pw2.getAxis('bottom')
pw2x.setGrid(150) # 设置默认x轴网格
pw2y = self.pw2.getAxis('left')
pw2y.setGrid(150) # 设置默认y轴网格
self.vbl_1.addWidget(self.pw2)
self.pw2.setMinimumWidth(1500)
self.pw2.setMaximumWidth(1800)
self.pw2.setDownsampling(mode='peak')
self.pw2.setClipToView(True)
self.curve5 = self.pw2.plot()
self.curve6 = self.pw2.plot()
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
## Draw an arrowhead next to the text box
# self.arrow = pg.ArrowItem()
# self.pw2.addItem(self.arrow)
# 从数据库读取一分钟数据画分钟线
def plotMin(self, symbol):
self.initCompleted = True
cx = self.__mongoMinDB[symbol].find()
print cx.count()
if cx:
for data in cx:
self.barOpen = data['open']
self.barClose = data['close']
self.barLow = data['low']
self.barHigh = data['high']
self.barOpenInterest = data['openInterest']
# print self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest
self.onBar(self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest)
self.num += 1
# 画历史数据K线图
def plotHistorticData(self):
self.initCompleted = True
for i in xrange(len(self.data)):
self.onBar(i, self.data.index[i], self.data.open[i], self.data.close[i], self.data.low[i], self.data.high[i])
self.plotKline()
print "plotKLine success"
#----------------------------------------------------------------------
def initHistoricalData(self):
"""初始历史数据"""
if self.symbol!='':
print "download histrical data:",self.symbol
self.initCompleted = True # 读取历史数据完成
td = timedelta(days=1) # 读取3天的历史TICK数据
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
print cx.count()
if cx:
for data in cx:
tick = Tick(data['symbol'])
tick.openPrice = data['lastPrice']
tick.highPrice = data['upperLimit']
tick.lowPrice = data['lowerLimit']
tick.lastPrice = data['lastPrice']
tick.volume = data['volume']
tick.openInterest = data['openInterest']
tick.upperLimit = data['upperLimit']
tick.lowerLimit = data['lowerLimit']
tick.time = data['time']
# tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['bidPrice1']
tick.bidPrice2 = data['bidPrice2']
tick.bidPrice3 = data['bidPrice3']
tick.bidPrice4 = data['bidPrice4']
tick.bidPrice5 = data['bidPrice5']
tick.askPrice1 = data['askPrice1']
tick.askPrice2 = data['askPrice2']
tick.askPrice3 = data['askPrice3']
tick.askPrice4 = data['askPrice4']
tick.askPrice5 = data['askPrice5']
tick.bidVolume1 = data['bidVolume1']
tick.bidVolume2 = data['bidVolume2']
tick.bidVolume3 = data['bidVolume3']
tick.bidVolume4 = data['bidVolume4']
tick.bidVolume5 = data['bidVolume5']
tick.askVolume1 = data['askVolume1']
tick.askVolume2 = data['askVolume2']
tick.askVolume3 = data['askVolume3']
tick.askVolume4 = data['askVolume4']
tick.askVolume5 = data['askVolume5']
self.onTick(tick)
print('load historic data completed')
#----------------------------------------------------------------------
def plotKline(self):
"""K线图"""
if self.initCompleted:
# 均线
self.curve5.setData(self.listfastEMA, pen=(255, 0, 0), name="Red curve")
self.curve6.setData(self.listslowEMA, pen=(0, 255, 0), name="Green curve")
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def plotText(self):
lenClose = len(self.listClose)
if lenClose >= 5: # Fractal Signal
if self.listClose[-1] > self.listClose[-2] and self.listClose[-3] > self.listClose[-2] and self.listClose[-4] > self.listClose[-2] and self.listClose[-5] > self.listClose[-2] and self.listfastEMA[-1] > self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listLow[-1]), angle=90, brush=(255, 0, 0))#红色
self.pw2.addItem(self.arrow)
elif self.listClose[-1] < self.listClose[-2] and self.listClose[-3] < self.listClose[-2] and self.listClose[-4] < self.listClose[-2] and self.listClose[-5] < self.listClose[-2] and self.listfastEMA[-1] < self.listslowEMA[-1]:
## Draw an arrowhead next to the text box
# self.pw2.removeItem(self.arrow)
self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listHigh[-1]), angle=-90, brush=(0, 255, 0))#绿色
self.pw2.addItem(self.arrow)
#----------------------------------------------------------------------
def onBar(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
self.listOpen.append(o)
self.listClose.append(c)
self.listHigh.append(h)
self.listLow.append(l)
#计算K线图EMA均线
if self.fastEMA:
self.fastEMA = c*self.EMAFastAlpha + self.fastEMA*(1-self.EMAFastAlpha)
self.slowEMA = c*self.EMASlowAlpha + self.slowEMA*(1-self.EMASlowAlpha)
else:
self.fastEMA = c
self.slowEMA = c
self.listfastEMA.append(self.fastEMA)
self.listslowEMA.append(self.slowEMA)
self.plotText() #显示开仓位置
# ----------------------------------------------------------------------
#画合并后的K线Bar
def onBarAfterFenXing(self, n, t, o, c, l, h):
self.listBar.append((n, t, o, c, l, h))
def plotKlineAfterFenXing(self):
# 画K线
self.pw2.removeItem(self.candle)
self.candle = self.CandlestickItem(self.listBar)
self.pw2.addItem(self.candle)
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoMinDB = self.__mongoConnection['VnTrader_1Min_Db']
except ConnectionFailure:
pass
########################################################################
class TickWidget(QtGui.QWidget):
"""用于显示价格走势图"""
signal = QtCore.pyqtSignal(type(Event()))
# tick图的相关参数、变量
listlastPrice = np.empty(1000)
fastMA = 0
midMA = 0
slowMA = 0
listfastMA = np.empty(1000)
listmidMA = np.empty(1000)
listslowMA = np.empty(1000)
tickFastAlpha = 0.0333 # 快速均线的参数,30
tickMidAlpha = 0.0167 # 中速均线的参数,60
tickSlowAlpha = 0.0083 # 慢速均线的参数,120
ptr = 0
ticktime = None # tick数据时间
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2
a = pg.AxisItem('bottom', pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True)
a.setFixedWidth(1)
a.setWidth(1)
a.setLabel(show=True)
a.setGrid(grid=True)
labelStyle = {'color': '#FFF', 'font-size': '14pt'}
a.setLabel('label text', units='V', **labelStyle)
# w = (self.data[1][0] - self.data[0][0]) / 3.
w = 0.2
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open))
pg.setConfigOption('leftButtonPan', False)
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
#----------------------------------------------------------------------
def __init__(self, eventEngine, chanlunEngine, parent=None):
"""Constructor"""
super(TickWidget, self).__init__(parent)
self.__eventEngine = eventEngine
self.__chanlunEngine = chanlunEngine
# MongoDB数据库相关
self.__mongoConnected = False
self.__mongoConnection = None
self.__mongoTickDB = None
# 调用函数
self.initUi()
self.registerEvent()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'Tick')
self.vbl_1 = QtGui.QHBoxLayout()
self.initplotTick() # plotTick初始化
self.setLayout(self.vbl_1)
#----------------------------------------------------------------------
def initplotTick(self):
""""""
self.pw1 = pg.PlotWidget(name='Plot1')
self.vbl_1.addWidget(self.pw1)
self.pw1.setMinimumWidth(1500)
self.pw1.setMaximumWidth(1800)
self.pw1.setRange(xRange=[-360, 0])
self.pw1.setLimits(xMax=5)
self.pw1.setDownsampling(mode='peak')
self.pw1.setClipToView(True)
self.curve1 = self.pw1.plot()
self.curve2 = self.pw1.plot()
self.curve3 = self.pw1.plot()
self.curve4 = self.pw1.plot()
# #----------------------------------------------------------------------
# def initHistoricalData(self,startDate=None):
# """初始历史数据"""
# print "download histrical data"
# self.initCompleted = True # 读取历史数据完成
# td = timedelta(days=1) # 读取3天的历史TICK数据
#
# if startDate:
# cx = self.loadTick(self.symbol, startDate-td)
# else:
# today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
# cx = self.loadTick(self.symbol, today-td)
#
# print cx.count()
#
# if cx:
# for data in cx:
# tick = Tick(data['symbol'])
#
# tick.openPrice = data['lastPrice']
# tick.highPrice = data['upperLimit']
# tick.lowPrice = data['lowerLimit']
# tick.lastPrice = data['lastPrice']
#
# tick.volume = data['volume']
# tick.openInterest = data['openInterest']
#
# tick.upperLimit = data['upperLimit']
# tick.lowerLimit = data['lowerLimit']
#
# tick.time = data['time']
# # tick.ms = data['UpdateMillisec']
#
# tick.bidPrice1 = data['bidPrice1']
# tick.bidPrice2 = data['bidPrice2']
# tick.bidPrice3 = data['bidPrice3']
# tick.bidPrice4 = data['bidPrice4']
# tick.bidPrice5 = data['bidPrice5']
#
# tick.askPrice1 = data['askPrice1']
# tick.askPrice2 = data['askPrice2']
# tick.askPrice3 = data['askPrice3']
# tick.askPrice4 = data['askPrice4']
# tick.askPrice5 = data['askPrice5']
#
# tick.bidVolume1 = data['bidVolume1']
# tick.bidVolume2 = data['bidVolume2']
# tick.bidVolume3 = data['bidVolume3']
# tick.bidVolume4 = data['bidVolume4']
# tick.bidVolume5 = data['bidVolume5']
#
# tick.askVolume1 = data['askVolume1']
# tick.askVolume2 = data['askVolume2']
# tick.askVolume3 = data['askVolume3']
# tick.askVolume4 = data['askVolume4']
# tick.askVolume5 = data['askVolume5']
#
# self.onTick(tick)
#
# print('load historic data completed')
#----------------------------------------------------------------------
def plotTick(self):
"""画tick图"""
self.curve1.setData(self.listlastPrice[:self.ptr])
self.curve2.setData(self.listfastMA[:self.ptr], pen=(255, 0, 0), name="Red curve")
self.curve3.setData(self.listmidMA[:self.ptr], pen=(0, 255, 0), name="Green curve")
self.curve4.setData(self.listslowMA[:self.ptr], pen=(0, 0, 255), name="Blue curve")
self.curve1.setPos(-self.ptr, 0)
self.curve2.setPos(-self.ptr, 0)
self.curve3.setPos(-self.ptr, 0)
self.curve4.setPos(-self.ptr, 0)
#----------------------------------------------------------------------
def updateMarketData(self, event):
"""更新行情"""
data = event.dict_['data']
print "update", data['InstrumentID']
symbol = data['InstrumentID']
tick = Tick(symbol)
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
tick.time = data['UpdateTime']
tick.ms = data['UpdateMillisec']
tick.bidPrice1 = data['BidPrice1']
tick.bidPrice2 = data['BidPrice2']
tick.bidPrice3 = data['BidPrice3']
tick.bidPrice4 = data['BidPrice4']
tick.bidPrice5 = data['BidPrice5']
tick.askPrice1 = data['AskPrice1']
tick.askPrice2 = data['AskPrice2']
tick.askPrice3 = data['AskPrice3']
tick.askPrice4 = data['AskPrice4']
tick.askPrice5 = data['AskPrice5']
tick.bidVolume1 = data['BidVolume1']
tick.bidVolume2 = data['BidVolume2']
tick.bidVolume3 = data['BidVolume3']
tick.bidVolume4 = data['BidVolume4']
tick.bidVolume5 = data['BidVolume5']
tick.askVolume1 = data['AskVolume1']
tick.askVolume2 = data['AskVolume2']
tick.askVolume3 = data['AskVolume3']
tick.askVolume4 = data['AskVolume4']
tick.askVolume5 = data['AskVolume5']
self.onTick(tick) # tick数据更新
self.__recordTick(tick) #记录Tick数据
#----------------------------------------------------------------------
def onTick(self, tick):
"""tick数据更新"""
from datetime import time
# 首先生成datetime.time格式的时间(便于比较),从字符串时间转化为time格式的时间
hh, mm, ss = tick.time.split(':')
self.ticktime = time(int(hh), int(mm), int(ss), microsecond=tick.ms)
# 计算tick图的相关参数
if self.ptr == 0:
self.fastMA = tick.lastPrice
self.midMA = tick.lastPrice
self.slowMA = tick.lastPrice
else:
self.fastMA = (1-self.tickFastAlpha) * self.fastMA + self.tickFastAlpha * tick.lastPrice
self.midMA = (1-self.tickMidAlpha) * self.midMA + self.tickMidAlpha * tick.lastPrice
self.slowMA = (1-self.tickSlowAlpha) * self.slowMA + self.tickSlowAlpha * tick.lastPrice
self.listlastPrice[self.ptr] = int(tick.lastPrice)
self.listfastMA[self.ptr] = int(self.fastMA)
self.listmidMA[self.ptr] = int(self.midMA)
self.listslowMA[self.ptr] = int(self.slowMA)
self.ptr += 1
print(self.ptr)
if self.ptr >= self.listlastPrice.shape[0]:
tmp = self.listlastPrice
self.listlastPrice = np.empty(self.listlastPrice.shape[0] * 2)
self.listlastPrice[:tmp.shape[0]] = tmp
tmp = self.listfastMA
self.listfastMA = np.empty(self.listfastMA.shape[0] * 2)
self.listfastMA[:tmp.shape[0]] = tmp
tmp = self.listmidMA
self.listmidMA = np.empty(self.listmidMA.shape[0] * 2)
self.listmidMA[:tmp.shape[0]] = tmp
tmp = self.listslowMA
self.listslowMA = np.empty(self.listslowMA.shape[0] * 2)
self.listslowMA[:tmp.shape[0]] = tmp
# 调用画图函数
self.plotTick() # tick图
#----------------------------------------------------------------------
def __connectMongo(self):
"""连接MongoDB数据库"""
try:
self.__mongoConnection = pymongo.MongoClient("localhost", 27017)
self.__mongoConnected = True
self.__mongoTickDB = self.__mongoConnection['VnTrader_Tick_Db']
except ConnectionFailure:
pass
#----------------------------------------------------------------------
def __recordTick(self, data):
"""将Tick数据插入到MongoDB中"""
if self.__mongoConnected:
symbol = data['InstrumentID']
data['date'] = datetime.now().strftime('%Y%m%d')
self.__mongoTickDB[symbol].insert(data)
# #----------------------------------------------------------------------
# def loadTick(self, symbol, startDate, endDate=None):
# """从MongoDB中读取Tick数据"""
# cx = self.__mongoTickDB[symbol].find()
# print cx.count()
# return cx
# # if self.__mongoConnected:
# # collection = self.__mongoTickDB[symbol]
# #
# # # 如果输入了读取TICK的最后日期
# # if endDate:
# # cx = collection.find({'date': {'$gte': startDate, '$lte': endDate}})
# # else:
# # cx = collection.find({'date': {'$gte': startDate}})
# # return cx
# # else:
# # return None
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
print "connect"
self.signal.connect(self.updateMarketData)
self.__eventEngine.register(EVENT_MARKETDATA, self.signal.emit)
class Tick:
"""Tick数据对象"""
#----------------------------------------------------------------------
def __init__(self, symbol):
"""Constructor"""
self.symbol = symbol # 合约代码
self.openPrice = 0 # OHLC
self.highPrice = 0
self.lowPrice = 0
self.lastPrice = 0
self.volume = 0 # 成交量
self.openInterest = 0 # 持仓量
self.upperLimit = 0 # 涨停价
self.lowerLimit = 0 # 跌停价
self.time = '' # 更新时间和毫秒
self.ms = 0
self.bidPrice1 = 0 # 深度行情
self.bidPrice2 = 0
self.bidPrice3 = 0
self.bidPrice4 = 0
self.bidPrice5 = 0
self.askPrice1 = 0
self.askPrice2 = 0
self.askPrice3 = 0
self.askPrice4 = 0
self.askPrice5 = 0
self.bidVolume1 = 0
self.bidVolume2 = 0
self.bidVolume3 = 0
self.bidVolume4 = 0
self.bidVolume5 = 0
self.askVolume1 = 0
self.askVolume2 = 0
self.askVolume3 = 0
self.askVolume4 = 0
self.askVolume5 = 0 | mit |
redarmy30/Eurobot-2017 | old year/RESET-master/Machine_vision/get_position.py | 2 | 3426 | #!/usr/bin/env python2
import numpy as np
import cv2
from matplotlib import pyplot as plt
from math import sin, cos, tan, sqrt, pi, atan
from operator import itemgetter
import timeit
#start = timeit.timeit()
h = 0.37 #the vertical distance from the ground to camera [in meters]
alpha = pi*(28.3)/180.0 #the inclination angle in degrees
F = 0.25 #the focal distance [in meters]0.00367
Nx = 640.0 #number of pixels along x axis on the focal plane
Ny = 480.0 #number of pixels along the y axis on the focal plane
psi = 78.0*pi/180.0 # maximum angular resolution in diagonal
Tetha = 2.0*atan((tan(psi/2.0))*3.0/5.0) # maximum resolution angle for vertical view
Fi = 2.0*atan((tan(psi/2.0))*4.0/5.0) # maximum resolution angle for horizontal view
#Initial calculations
gamma = pi/2.0 - alpha #calculate the inclination of focal plane
YM = F/cos(alpha) - h*tan(alpha)
YA = F*cos(alpha)
ZA = h - F*sin(alpha)
ksim = 2.0*F*tan(Tetha/2.0)
etham = 2.0*F*tan(Tetha/2.0)
# camera initialisation
#DEFINE CALSSIFICATION OF OBJECTS
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 1
params.maxThreshold = 2000
# Filter by Area.
params.filterByArea = 1
params.minArea = 1000
params.maxArea = 100000
"""# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1"""
"""# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
params.maxConvexity = 1"""
"""# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0"""
"""#Filter by color
params.filterByColor = 1
params.blobColor = 0;0;0"""
#detector = cv2.SimpleBlobDetector_create(params)
detector = cv2.SimpleBlobDetector(params) #- use this if line 57 returns error!!!
class GetObjectPosition(object):
def get_position(self):
cap = cv2.VideoCapture(0)
#cap.set(7, 15)
_, frame = cap.read()
im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
keypoints = detector.detect(im)
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
a = len (keypoints)
points = []
screenpoints = []
for keypoint in keypoints:
x0 = keypoint.pt[0]
y0 = keypoint.pt[1]
cx = x0
cy = y0
ksiE = cx*ksim/Nx
ethaE = cy*etham/Ny
Nx1 = ksim
Ny1 = etham
ksiA = ksiE - Nx1/2.0
ethaA = -(ethaE - Ny1/2.0)
YA1 = YA + ethaA*cos(gamma)
ZA1 = (YA1 - YM)*tan(gamma)
XA1 = ksiA
t = h/(h-ZA1)
X = XA1*t
Y = YA1*t
R0 = sqrt(X**2.0+Y**2.0)
X = int(X*1000.0)
Y = int(Y*1000.0)
R0 = int(R0*1000.0)
points.append((X, Y, R0))
screenpoints.append((x0,y0))
points1 = str(points)
cv2.imwrite('result.png',im_with_keypoints)
if not points:
return
z = sorted(points, key=itemgetter(2))
z1 = str(z)
b = z[0]
points = str(points)
file = open("result.txt", "w")
file.write("unsorted list")
file.write(points)#unsorted
file.write("\n")
file.write("sorted list")
file.write(z1)#sorted
file.write("\n")
file.write("The nearest objest is:")
file.write(str(b))
file.close()
#img1 = cv2.imread('result.png')
#img2 = cv2.putText(img = img1,text = points,org = (0,Ny),fontFace = cv2.FONT_HERSHEY_DUPLEX,fontScale = 0.5,
#color = (1,1,255))
cv2.imwrite('result1.png',img2)
return(b)
del(cap)
a = GetObjectPosition()
coordinates = a.get_position()
print coordinates
#end = timeit.timeit()
#print end - start | mit |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_noRot_cont/Geneva_noRot_cont_age5/UV2.py | 33 | 7365 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('UV_Lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_console_config.py | 24 | 21691 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
erscott/Wellderly | SWGR_v1.0/masterVar_chr_split.py | 1 | 3344 | '''
Splits Complete Genomics masterVar files into chromosome specific masterVar
files when given an input file path and an output directory path.
e.g. >python masterVar_chr_split.py -i /path/to/masterVar.tsv.bz2 -o /path/to/output_dir/
Python package dependencies:
pandas, numpy
python 2.7 for argparse module
'''
import pandas as pd
import os, sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', type=str,help='Specifies the input file, /path/to/CG_data/masterVar.tsv.bz2')
parser.add_argument('-o', type=str,help='Specifies the output directory, e.g. /path/to/CG_data/chromosome/')
def chr_split_mastervar(f_path, target_path):
#Get header for masterVar
header = os.popen('bzcat ' + f_path+ ' | head -100 | grep chromosome -n').readlines()
#Creating Reader object for iterating through NA12878 CG masterVar file
skip_rows = int(header[0].split(":")[0]) -1
mastervar_headings = os.popen('head -' + str(skip_rows) + f_path).readlines()
#Creating pandas dataframe with chunksize 200,000 lines
chunk = pd.read_table(f_path, chunksize=200000, sep="\t", skiprows=skip_rows,compression='bz2',dtype=object)
chunk.columns = header[0].rstrip('\n').split(":")[1].split("\t") #relabeling columns
prev_chr = 'chr1' #tracking chromosome position
prev_target_write_file = None
for mastervar in chunk: #iterate through mastervar file
for current_chrom,chr_df in mastervar.groupby(['chromosome']): #split dataframe by chromosome for writing
#check for increment to new chromosome
if prev_chr != current_chrom:
os.system('bzip2 ' + prev_target_write_file) #compress last chromosome file
prev_chr = current_chrom
#specifying output file path and chromosome-specific name
file_name = f_path.split("/")[-1].rstrip(".tsv.bz2") #getting file prefix
target_path = target_path.rstrip("/")+"/" #ensuring target path ends with fwd slash
write_target_file_path = target_path +file_name + "_" + current_chrom +".tsv" #specify target directory and chrom file name
#print write_target_file_path
if len(os.popen('find '+ write_target_file_path + '').readlines()) == 0: #checking for output file
os.system('bzcat '+ f_path + '| head -' + str(skip_rows) + " > " +write_target_file_path) #writing header if no output file found
chr_df.to_csv(write_target_file_path, sep="\t", index=False, mode='a') #writing chromosome specific variants to output file
else: #Suppress header if target file found
chr_df.to_csv(write_target_file_path, sep="\t", index=False, mode='a', header=False) #writing chromosome specifc variants to output file w/o header
prev_target_write_file = write_target_file_path #increment to current write_target_file_path
return 'complete'
opts = parser.parse_known_args()
f_path, target_path = opts[0].i, opts[0].o
assert f_path.split(".")[-2:] == ['tsv','bz2'], "expecting masterVar input file suffix .tsv.bz2"
test = chr_split_mastervar(f_path, target_path)
if test == 'complete':
print 'All chromosomes processed'
| bsd-3-clause |
jeremyclover/airflow | airflow/hooks/base_hook.py | 20 | 1812 | from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.utils import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplemented()
def get_records(self, sql):
raise NotImplemented()
def get_pandas_df(self, sql):
raise NotImplemented()
def run(self, sql):
raise NotImplemented()
| apache-2.0 |
shangwuhencc/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
panmari/tensorflow | tensorflow/examples/skflow/boston.py | 1 | 1485 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, cross_validation, metrics
from sklearn import preprocessing
from tensorflow.contrib import skflow
# Load dataset
boston = datasets.load_boston()
X, y = boston.data, boston.target
# Split dataset into train / test
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2, random_state=42)
# scale data (training set) to 0 mean and unit Std. dev
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
# Build 2 layer fully connected DNN with 10, 10 units respecitvely.
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 10],
steps=5000, learning_rate=0.1, batch_size=1)
# Fit
regressor.fit(X_train, y_train)
# Predict and score
score = metrics.mean_squared_error(regressor.predict(scaler.fit_transform(X_test)), y_test)
print('MSE: {0:f}'.format(score))
| apache-2.0 |
wkerzendorf/wsynphot | wsynphot/base.py | 1 | 15987 | # defining the base filter curve classes
import os
from scipy import interpolate
from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D
import pandas as pd
from wsynphot.io.cache_filters import load_filter_index, load_transmission_data
from astropy import units as u, constants as const
from astropy import utils
import numpy as np
from wsynphot.calibration import get_vega_calibration_spectrum
def calculate_filter_flux_density(spectrum, filter):
"""
Calculate the average flux through the filter by evaluating the integral
..math::
f_lambda = \\frac{\\int_}{}
Parameters
----------
spectrum: ~specutils.Spectrum1D
spectrum object
filter: ~wsynphot.FilterCurve
:return:
"""
filtered_spectrum = filter * spectrum
filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength,
filtered_spectrum.wavelength)
return filter_flux_density
def calculate_vega_magnitude(spectrum, filter):
filter_flux_density = calculate_filter_flux_density(spectrum, filter)
wavelength_delta = filter.calculate_wavelength_delta()
filtered_f_lambda = (filter_flux_density / wavelength_delta)
zp_vega_f_lambda = filter.zp_vega_f_lambda
return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda)
def calculate_ab_magnitude(spectrum, filter):
filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) /
filter.calculate_wavelength_delta())
return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda)
def list_filters():
"""
List available filter sets along with their properties
"""
return load_filter_index()
class BaseFilterCurve(object):
"""
Basic filter curve class
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength for filter curve
transmission_lambda: numpy.ndarray
transmission_lambda for filter curve
interpolation_kind: str
allowed interpolation kinds given in scipy.interpolate.interp1d
"""
@classmethod
def load_filter(cls, filter_id=None, interpolation_kind='linear'):
"""
Parameters
----------
filter_id: str or None
if None is provided will return a DataFrame of all filters
interpolation_kind: str
see scipy.interpolation.interp1d
"""
if filter_id is None:
return list_filters()
else:
filter = load_transmission_data(filter_id)
wavelength_unit = 'angstrom'
wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit)
return cls(wavelength, filter['Transmission'].values,
interpolation_kind=interpolation_kind,
filter_id=filter_id)
def __init__(self, wavelength, transmission_lambda,
interpolation_kind='linear', filter_id=None):
if not hasattr(wavelength, 'unit'):
raise ValueError('the wavelength needs to be a astropy quantity')
self.wavelength = wavelength
self.transmission_lambda = transmission_lambda
self.interpolation_object = interpolate.interp1d(self.wavelength,
self.transmission_lambda,
kind=interpolation_kind,
bounds_error=False,
fill_value=0.0)
self.filter_id = filter_id
def __mul__(self, other):
if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'):
raise ValueError('requiring a specutils.Spectrum1D-like object that'
'has attributes "flux" and "wavelength"')
#new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value,
# self.wavelength.value) * self.wavelength.unit
transmission = self.interpolate(other.wavelength)
return Spectrum1D.from_array(other.wavelength, transmission * other.flux)
def __rmul__(self, other):
return self.__mul__(other)
@utils.lazyproperty
def lambda_pivot(self):
"""
Calculate the pivotal wavelength as defined in Bessell & Murphy 2012
.. math::
\\lambda_\\textrm{pivot} = \\sqrt{
\\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\
<f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c}
"""
return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/
(np.trapz(self.transmission_lambda / self.wavelength, self.wavelength))))
@utils.lazyproperty
def wavelength_start(self):
return self.get_wavelength_start()
@utils.lazyproperty
def wavelength_end(self):
return self.get_wavelength_end()
@utils.lazyproperty
def zp_ab_f_lambda(self):
return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to(
'erg/s/cm^2/Angstrom', u.spectral())
@utils.lazyproperty
def zp_ab_f_nu(self):
return (3631 * u.Jy).to('erg/s/cm^2/Hz')
@utils.lazyproperty
def zp_vega_f_lambda(self):
return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) /
self.calculate_wavelength_delta())
def interpolate(self, wavelength):
"""
Interpolate the filter onto new wavelength grid
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength grid to interpolate on
"""
converted_wavelength = wavelength.to(self.wavelength.unit)
return self.interpolation_object(converted_wavelength)
def _calculuate_flux_density(self, wavelength, flux):
return _calculcate_filter_flux_density(flux, self)
def calculate_flux_density(self, spectrum):
return calculate_filter_flux_density(spectrum, self)
def calculate_f_lambda(self, spectrum):
return (self.calculate_flux_density(spectrum) /
self.calculate_wavelength_delta())
def calculate_wavelength_delta(self):
"""
Calculate the Integral :math:`\integral
:return:
"""
return np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength)
def calculate_weighted_average_wavelength(self):
"""
Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}`
Returns
: ~astropy.units.Quantity
"""
return (np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength) / self.calculate_wavelength_delta())
def calculate_vega_magnitude(self, spectrum):
__doc__ = calculate_vega_magnitude.__doc__
return calculate_vega_magnitude(spectrum, self)
def calculate_ab_magnitude(self, spectrum):
__doc__ = calculate_ab_magnitude.__doc__
return calculate_ab_magnitude(spectrum, self)
def convert_ab_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_ab_f_lambda
def convert_vega_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_vega_f_lambda
def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={},
format_filter_id=None):
if scale_max is not None:
if hasattr(scale_max, 'unit'):
scale_max = scale_max.value
transmission = (self.transmission_lambda * scale_max
/ self.transmission_lambda.max())
else:
transmission = self.transmission_lambda
ax.plot(self.wavelength, transmission, **plot_kwargs)
ax.set_xlabel('Wavelength [{0}]'.format(
self.wavelength.unit.to_string(format='latex')))
ax.set_ylabel('Transmission [1]')
if make_label==True and self.filter_id is not None:
if format_filter_id is not None:
filter_id = format_filter_id(self.filter_id)
else:
filter_id = self.filter_id
text_x = (self.lambda_pivot).value
text_y = transmission.max()/2
ax.text(text_x, text_y, filter_id,
horizontalalignment='center', verticalalignment='center',
bbox=dict(facecolor='white', alpha=0.5))
def get_wavelength_start(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(threshold)]
def get_wavelength_end(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)]
class FilterCurve(BaseFilterCurve):
def __repr__(self):
if self.filter_id is None:
filter_id = "{0:x}".format(self.__hash__())
else:
filter_id = self.filter_id
return "FilterCurve <{0}>".format(filter_id)
class FilterSet(object):
"""
A set of filters
Parameters
----------
filter_set: ~list
a list of strings or a list of filters
interpolation_kind: ~str
scipy interpolaton kinds
"""
def __init__(self, filter_set, interpolation_kind='linear'):
if hasattr(filter_set[0], 'wavelength'):
self.filter_set = filter_set
else:
self.filter_set = [FilterCurve.load_filter(filter_id,
interpolation_kind=
interpolation_kind)
for filter_id in filter_set]
def __iter__(self):
self.current_filter_idx = 0
return self
def __next__(self):
try:
item = self.filter_set[self.current_filter_idx]
except IndexError:
raise StopIteration
self.current_filter_idx += 1
return item
next = __next__
def __getitem__(self, item):
return self.filter_set.__getitem__(item)
def __repr__(self):
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(
[item.filter_id
for item in self.filter_set]))
@property
def lambda_pivot(self):
return u.Quantity([item.lambda_pivot for item in self])
def calculate_f_lambda(self, spectrum):
return u.Quantity(
[item.calculate_f_lambda(spectrum) for item in self.filter_set])
def calculate_ab_magnitudes(self, spectrum):
mags = [item.calculate_ab_magnitude(spectrum)
for item in self.filter_set]
return mags
def calculate_vega_magnitudes(self, spectrum):
mags = [item.calculate_vega_magnitude(spectrum)
for item in self.filter_set]
return mags
def convert_ab_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_ab_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_vega_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def plot_spectrum(self, spectrum, ax, make_labels=True,
spectrum_plot_kwargs={}, filter_plot_kwargs={},
filter_color_list=None, format_filter_id=None):
"""
plot a spectrum with the given filters
spectrum:
ax:
make_labels:
:return:
"""
ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs)
for i, filter in enumerate(self.filter_set):
filter_scale = filter.calculate_f_lambda(spectrum)
if filter_color_list is not None:
filter_plot_kwargs['color'] = filter_color_list[i]
filter.plot(ax, scale_max=filter_scale, make_label=make_labels,
plot_kwargs=filter_plot_kwargs,
format_filter_id=format_filter_id)
class MagnitudeSet(FilterSet):
def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None,
interpolation_kind='linear'):
super(MagnitudeSet, self).__init__(filter_set,
interpolation_kind=
interpolation_kind)
self.magnitudes = np.array(magnitudes)
self.magnitude_uncertainties = np.array(magnitude_uncertainties)
def __repr__(self):
mag_str = '{0} {1:.4f} +/- {2:.4f}'
mag_data = []
for i, filter in enumerate(self.filter_set):
unc = (np.nan if self.magnitude_uncertainties is None
else self.magnitude_uncertainties[i])
mag_data.append(mag_str.format(filter.filter_id,
self.magnitudes[i], unc))
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(mag_data))
| bsd-3-clause |
ifuding/Kaggle | PMRCN/Code/siamese_net.py | 1 | 22230 |
from sklearn import *
import sklearn
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
from time import gmtime, strftime
import numpy.random as rng
from multiprocessing.dummy import Pool
import h5py
import concurrent.futures
import tensorflow as tf
import multiprocessing as mp
from sklearn.cross_validation import KFold
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers import Input, concatenate, merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras import backend as K
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
graph = tf.get_default_graph()
HIDDEN_UNITS = [64, 16, 8]
DNN_EPOCHS = 40
BATCH_SIZE = 5
DNN_BN = True
DROPOUT_RATE = 0.5
SIAMESE_PAIR_SIZE = 100000
MAX_WORKERS = 8
EMBEDDING_SIZE = 6
full_feature = True
data_folder = '../Data/'
train = pd.read_csv(data_folder + 'training_variants')
#print train.dtypes
test = pd.read_csv(data_folder + 'test_variants')
trainx = pd.read_csv(data_folder + 'training_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
#print trainx.dtypes
testx = pd.read_csv(data_folder + 'test_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
train = pd.merge(train, trainx, how='left', on='ID').fillna('')
#train = train.iloc[1:1000]
y = train['Class'].values
train = train.drop(['Class'], axis=1)
test = pd.merge(test, testx, how='left', on='ID').fillna('')
pid = test['ID'].values
#df_all = pd.concat((train, test), axis=0, ignore_index=True)
#df_all['Gene_Share'] = df_all.apply(lambda r: sum([1 for w in r['Gene'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#df_all['Variation_Share'] = df_all.apply(lambda r: sum([1 for w in r['Variation'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#
#print df_all[['Gene_Share', 'Variation_Share']].max()
## exit(0)
#if full_feature:
# #commented for Kaggle Limits
# for i in range(5):
# df_all['Gene_'+str(i)] = df_all['Gene'].map(lambda x: str(x[i]) if len(x)>i else '')
# df_all['Variation'+str(i)] = df_all['Variation'].map(lambda x: str(x[i]) if len(x)>i else '')
# print df_all.dtypes
#
# gen_var_lst = sorted(list(train.Gene.unique()) + list(train.Variation.unique()))
# print(len(gen_var_lst))
# gen_var_lst = [x for x in gen_var_lst if len(x.split(' '))==1]
# print(len(gen_var_lst))
# i_ = 0
# #commented for Kaggle Limits
# for gen_var_lst_itm in gen_var_lst:
# if i_ % 100 == 0: print(i_)
# df_all['GV_'+str(gen_var_lst_itm)] = df_all['Text'].map(lambda x: str(x).count(str(gen_var_lst_itm))).astype(np.int8)
# i_ += 1
# if i_ == 5:
# break
#
#for c in df_all.columns:
# if df_all[c].dtype == 'object':
# if c in ['Gene','Variation']:
# lbl = preprocessing.LabelEncoder()
# df_all[c+'_lbl_enc'] = lbl.fit_transform(df_all[c].values)
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
# elif c != 'Text':
# lbl = preprocessing.LabelEncoder()
# df_all[c] = lbl.fit_transform(df_all[c].values)
# if c=='Text':
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
#
#train = df_all.iloc[:len(train)]
#print "... train dtypes before svd ..."
#print train.dtypes
#print train.head()
#exit(0)
#test = df_all.iloc[len(train):]
#
#class cust_regression_vals(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# x = x.drop(['Gene', 'Variation','ID','Text'],axis=1).values
# return x
#
#class cust_txt_col(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def __init__(self, key):
# self.key = key
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# return x[self.key].apply(str)
#
#print('Pipeline...')
#fp = pipeline.Pipeline([
# ('union', pipeline.FeatureUnion(
# n_jobs = -1,
# transformer_list = [
# ('standard', cust_regression_vals()),
# ('pi1', pipeline.Pipeline([('Gene', cust_txt_col('Gene')), ('count_Gene', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd1', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# ('pi2', pipeline.Pipeline([('Variation', cust_txt_col('Variation')), ('count_Variation', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd2', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# #commented for Kaggle Limits
# ('pi3', pipeline.Pipeline([('Text', cust_txt_col('Text')), ('tfidf_Text', feature_extraction.text.TfidfVectorizer(ngram_range=(1, 2))), ('tsvd3', decomposition.TruncatedSVD(n_components=50, n_iter=25, random_state=12))]))
# ])
# )])
#
#train = fp.fit_transform(train);
#print type(train)
#print(train.shape)
#print (train.nbytes)
#np.save("train_array", train)
## print(df.dtypes)
## print(df.memory_usage())
#test = fp.transform(test); print(test.shape)
#np.save("test_array", test)
#exit(0)
train = np.load("./train_array.npy")
test = np.load("./test_array.npy")
# siamese_features_array = np.load("./siamese_features_array_2017_09_15_07_57_44.npy")
y = y - 1 #fix for zero bound array
CONTINUOUS_INDICES = []
SPARSE_INDICES = []
for i in range((train.shape)[1]):
if (i >= 3205 and i <= 3212):
pass
elif (i >= 2 and i <= 113): # or (i >= 114 and i <= 3204):
SPARSE_INDICES.append(i)
else:
CONTINUOUS_INDICES.append(i)
#train = train[:, CONTINUOUS_INDICES]
#test = test[:, CONTINUOUS_INDICES]
print('train shape after loading and selecting trainging columns: %s' % str(train.shape))
siamese_train_len = len(train) // 3
print('siamese_train_len is %d' % (siamese_train_len))
siamese_train_data = train[:siamese_train_len]
siamese_train_label = y[:siamese_train_len]
lgbm_train_data = train[siamese_train_len:]
lgbm_train_label = y[siamese_train_len:]
#train = train[:200]
#y = y[:200]
#test = test[:200]
#pid = pid[:200]
def xgbTrain(train_data, train_label, flod = 5):
"""
"""
denom = 0
fold = 5 #Change to 5, 1 for Kaggle Limits
models = []
for i in range(fold):
params = {
'eta': 0.03333,
'max_depth': 4,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'num_class': 9,
'seed': i,
'silent': True
}
x1, x2, y1, y2 = model_selection.train_test_split(train_data, train_label, test_size=0.18, random_state=i)
watchlist = [(xgb.DMatrix(x1, y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]
model = xgb.train(params, xgb.DMatrix(x1, y1), 1000, watchlist, verbose_eval=50, early_stopping_rounds=100)
score1 = metrics.log_loss(y2, model.predict(xgb.DMatrix(x2), ntree_limit=model.best_ntree_limit), labels = list(range(9)))
#print(score1)
models.append((model, 'x'))
return models
def lgbm_train(train_data, train_label, fold = 5):
"""
LGB Training
"""
# print train.shape
# print siamese_features_array.shape
# train_merge = siamese_features_array #np.concatenate((train, siamese_features_array), axis = 1)
# print train_merge.shape
# # exit(0)
print("Over all training size:")
print(train_data.shape)
# train_data = train_merge#[:train_len * 3 / 10]
# train_label = lgbm_train_label#[:train_len * 3 / 10]
#valide_data = train_merge[train_len * 9 / 10:]
#valide_label = y[train_len * 9 / 10:]
models = []
for i in range(fold):
d_train = lgb.Dataset(train_data, train_label) #, categorical_feature = SPARCE_INDICES)
#d_valide = lgb.Dataset(valide_data, valide_label)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'multiclass',
'metric': {'multi_logloss'},
'num_class': 9,
# 'num_leaves': 256,
# 'max_depth': 12,
# 'feature_fraction': 0.9,
# 'bagging_fraction': 0.95,
# 'bagging_freq': 5,
'num_leaves': 60, # 60,
# 'min_sum_hessian_in_leaf': 20,
'max_depth': 10, # 10,
'learning_rate': 0.02, # 0.02,
'feature_fraction': 0.5,
'verbose': 0,
# 'valid_sets': [d_valide],
'num_boost_round': 327,
'feature_fraction_seed': i,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 15,
# 'bagging_seed': i,
# 'early_stopping_round': 10
# 'random_state': 10
# 'verbose_eval': 20
#'min_data_in_leaf': 665
}
# ROUNDS = 1
print('fold: %d th light GBM train :-)' % (i))
# params['feature_fraction_seed'] = i
#bst = lgb.train(
# params ,
# d_train,
# verbose_eval = False
# # valid_sets = [d_valide]
# #num_boost_round = 1
# )
cv_result = lgb.cv(params, d_train, nfold=10)
pd.DataFrame(cv_result).to_csv('cv_result', index = False)
exit(0)
# pred = model_eval(bst, 'l', test)
#print pred.shape
#print pred[0, :]
models.append((bst, 'l'))
return models
def create_model(input_len):
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_len))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
# model.add(Dropout(0.1))
#model.add(Dense(32, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(9, activation='softmax'))
# optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def create_embedding_model(CONTINUE_SIZE, SPARSE_SIZE):
"""
"""
print('CONTINUOUS_SIZE = %d' % CONTINUE_SIZE)
print('SPARSE_SIZE = %d' % SPARSE_SIZE)
sparse_feature = Input(shape=(SPARSE_SIZE,))
sparse_embedding = Embedding(55, EMBEDDING_SIZE, input_length = SPARSE_SIZE)(sparse_feature)
sparse_embedding = Reshape((EMBEDDING_SIZE * SPARSE_SIZE,))(sparse_embedding)
# print "model input size: %d" % CONTINUOUS_COLUMNS
dense_input = Input(shape=(CONTINUE_SIZE,))
merge_input = concatenate([dense_input, sparse_embedding], axis = 1)
merge_len = CONTINUE_SIZE + EMBEDDING_SIZE * SPARSE_SIZE
output = create_model(merge_len)(merge_input)
model = Model([dense_input, sparse_feature], output)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
# optimizer = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer = Adam(),
loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def keras_train(train_data, train_target, nfolds = 10):
"""
Detect Fish or noFish
"""
print("Start gen training data, shuffle and normalize!")
#train_data = train
train_target = np_utils.to_categorical(train_target)
# train_data, train_target, siamese_data_loader = siamese_train(siamese_train_data, siamese_train_label)
kf = KFold(len(train_target), n_folds=nfolds, shuffle=True)
num_fold = 0
models = []
for train_index, test_index in kf:
# model = create_model(classes = 2)
model = create_embedding_model(len(CONTINUOUS_INDICES), len(SPARSE_INDICES))
# model = create_siamese_net((train.shape)[1])
X_train = train_data[train_index]
Y_train = train_target[train_index]
print('Positive samples in train: %d' % np.sum(Y_train))
print('Negative samples in train: %d' % (len(Y_train) - np.sum(Y_train)))
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
print('Positive samples in valide: %d' % np.sum(Y_valid))
print('Negative samples in valide: %d' % (len(Y_valid) - np.sum(Y_valid)))
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
callbacks = [
EarlyStopping(monitor='val_loss', patience=5, verbose=0),
]
model.fit([X_train[:, CONTINUOUS_INDICES], X_train[:, SPARSE_INDICES]],
Y_train, batch_size=BATCH_SIZE, epochs=DNN_EPOCHS,
shuffle=True, verbose=2,
validation_data=([X_valid[:, CONTINUOUS_INDICES], X_valid[:, SPARSE_INDICES]], Y_valid)
, callbacks=callbacks)
model_name = 'keras' + strftime('_%Y_%m_%d_%H_%M_%S', gmtime())
#model.save_weights(model_name)
#siamese_features_array = gen_siamese_features(model, lgbm_train_data, siamese_train_data, siamese_train_label)
models.append((model, 'k'))
break
return models #, siamese_features_array
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame)
elif model_type == 'k':
preds = model.predict(data_frame, batch_size=BATCH_SIZE, verbose=2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit+80)
return preds
def gen_sub(models, merge_features):
"""
Evaluate single Type model
"""
print('Start generate submission!')
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, merge_features)
#print pred.shape
#print pred[0, :]
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
submission = pd.DataFrame(preds, columns=['class'+str(c+1) for c in range(9)])
submission['ID'] = pid
sub_name = "submission" + strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) + ".csv"
print('Output to ' + sub_name)
submission.to_csv(sub_name, index=False)
def create_siamese_net(input_size):
"""
"""
input_shape = (input_size, )
left_input = Input(input_shape)
right_input = Input(input_shape)
#build model to use in each siamese 'leg'
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_size))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
#encode each of the two inputs into a vector with the convnet
encoded_l = model(left_input)
encoded_r = model(right_input)
#merge two encoded inputs with the l1 distance between them
L1_distance = lambda x: K.abs(x[0]-x[1])
both = merge([encoded_l,encoded_r], mode = L1_distance, output_shape=lambda x: x[0])
merge_layer = Dense(HIDDEN_UNITS[2],activation='sigmoid')(both)
prediction = Dense(1,activation='sigmoid')(merge_layer)
siamese_net = Model(input=[left_input,right_input],output=prediction)
#optimizer = SGD(0.0004,momentum=0.6,nesterov=True,decay=0.0003)
optimizer = Adam()
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
# print siamese_net.count_params()
return siamese_net
class Siamese_Loader:
#For loading batches and testing tasks to a siamese net
def __init__(self,Xtrain,Xval = None):
self.Xval = Xval
self.Xtrain = Xtrain
self.n_classes = Xtrain.shape[0]
self.feature_size = (Xtrain[0].shape)[1]
self.n_examples = np.array([x.shape[0] for x in Xtrain])
self.n_tot_examples = np.sum(self.n_examples)
print('examples of different classes: %s' % str(self.n_examples))
# self.n_val,self.n_ex_val,_,_ = Xval.shape
def get_batch(self,n):
#Create batch of pairs, half same class, half different class
categories = rng.choice(self.n_classes,size=(n,),replace=True)
pairs=np.zeros((2, n, self.feature_size))
targets=np.zeros((n,))
positive_begin_pos = n * 1 // 2
targets[positive_begin_pos:] = 1
categories_list = []
for i in range(n):
category = categories[i]
idx_1 = rng.randint(0, self.n_examples[category])
pairs[0][i] = self.Xtrain[category][idx_1] #.reshape(self.feature_size)
#pick images of same class for 1st half, different for 2nd
category_2 = category if i >= positive_begin_pos else (category + rng.randint(1,self.n_classes)) % self.n_classes
idx_2 = rng.randint(0,self.n_examples[category_2])
while i >= positive_begin_pos and idx_2 == idx_1:
idx_2 = rng.randint(0,self.n_examples[category_2])
pairs[1][i] = self.Xtrain[category_2][idx_2] #.reshape(self.w,self.h,1)
categories_list.append((category, category_2))
#pd.DataFrame(categories_list).to_csv('categories', index=False)
#exit(0)
# shuflle pairs to mix positive and negative
rng.shuffle(pairs)
return pairs, targets
def gen_test_on_support_data(Xsupport, Xtest):
"""
"""
n_support, feature_size = Xsupport.shape
pairs = np.zeros((2, n_support, feature_size))
pairs[0] = Xtest
pairs[1] = Xsupport
return list(pairs)
def siamese_train(siamese_train_data, siamese_train_label):
"""
"""
train_data = [[] for i in range(9)]
label_ind = 0
for feature in siamese_train_data:
train_data[siamese_train_label[label_ind]].append(feature)
label_ind += 1
train_data = np.array([np.array(xi) for xi in train_data])
print("train data shape before gen pair")
print(train_data.shape)
siamese_data_loader = Siamese_Loader(train_data, test)
pairs, targets = siamese_data_loader.get_batch(SIAMESE_PAIR_SIZE)
return pairs, targets, siamese_data_loader
def gen_siamese_features_meta(model, Xsupport_label, Xsupport, Xtest):
"""
"""
siamese_pair = gen_test_on_support_data(Xsupport, Xtest)
global graph
with graph.as_default():
preds = model.predict(siamese_pair, batch_size=BATCH_SIZE, verbose=2)
preds = np.insert(preds, 1, Xsupport_label, axis = 1)
preds = pd.DataFrame(preds, columns = ['sim', 'class'])
siamese_features = preds.groupby('class', sort = False) \
.agg({'sim': ['max', 'min', 'median', 'mean', 'std']})
max_class = siamese_features['sim']['max'].idxmax()
siamese_features = np.insert(siamese_features.values.flatten(), 0, max_class, axis = 0)
return siamese_features
def gen_siamese_features(siamese_model, Xtest, Xsupport, Xsupport_label):
"""
"""
if MAX_WORKERS <= 0:
print("MAX_WORKERS should >= 1", file=sys.stderr)
exit(1)
siamese_features_array = list(range(len(Xtest)))
test_begin = 0
while test_begin < len(Xtest):
test_end = min(test_begin + MAX_WORKERS, len(Xtest))
with concurrent.futures.ThreadPoolExecutor(max_workers = MAX_WORKERS) as executor:
future_predict = {executor.submit(gen_siamese_features_meta, siamese_model,
Xsupport_label,
Xsupport,
Xtest[ind]): ind for ind in range(test_begin, test_end)}
for future in concurrent.futures.as_completed(future_predict):
ind = future_predict[future]
try:
siamese_features = future.result()
siamese_features_array[ind] = siamese_features
except Exception as exc:
print('%dth feature generated an exception: %s' % (ind, exc))
test_begin = test_end
if test_begin % 100 == 0:
print('Gen %d siamsese features' % test_begin)
if test_begin != len(Xtest):
print("Only gen %d siamese features" % test_begin, file=sys.stderr)
exit(1)
siamese_features_array = np.array(siamese_features_array)
pd.DataFrame(siamese_features_array[:, 0]).astype(np.int8).to_csv('pred_label', index = False)
return siamese_features_array
if __name__ == "__main__":
model_k = keras_train(train, y, 10)
#np.save("siamese_features_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_array)
# gen_sub(model_k, 'k', th, F1)
# ind = np.array([i * 5 for i in range(9)])
# xgbTrain(siamese_features_array[:, ind], lgbm_train_label);
#lgbm_features = siamese_features_array #np.concatenate((lgbm_train_data, siamese_features_array),
# model_l = lgbm_train(train, y, 10) #lgbm_features, lgbm_train_label, 10)#model_k)
# siamese_features_test_array = siamese_test(model_k[0][0], test)
#np.save("siamese_features_test_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_test_array)
##model_x = xgbTrain(5)#model_k)
#gen_sub(model_l, siamese_features_test_array) #model_k)
| apache-2.0 |
fdudatamining/framework | tests/draw/test_simple.py | 1 | 1233 | import numpy as np
import pandas as pd
from unittest import TestCase
from framework import draw
X = np.array([1, 2, 3, 4, 5])
class TestSimplePlots(TestCase):
def test_kinds(self):
self.assertIsNotNone(draw.draw_kinds)
def test_line(self):
draw.draw(clear=True, kind='line', x=X, y=X)
draw.draw(clear=True, kind='line', y=X)
def test_scatter(self):
draw.draw(clear=True, kind='scatter', x=X, y=X)
draw.draw(clear=True, kind='scatter', y=X)
def test_stem(self):
draw.draw(clear=True, kind='stem', x=X, y=X)
draw.draw(clear=True, kind='stem', y=X)
def test_errorbar(self):
draw.draw(clear=True, kind='errorbar', x=X, y=X, xerr=X, yerr=X)
draw.draw(clear=True, kind='errorbar', y=X, yerr=X)
def test_boxplot(self):
draw.draw(clear=True, kind='boxplot', x=X)
def test_barplot(self):
draw.draw(clear=True, kind='barplot', x=X, y=X, width=1)
draw.draw(clear=True, kind='barplot', x=X, y=X)
draw.draw(clear=True, kind='barplot', y=X)
def test_contour(self):
draw.draw(clear=True, kind='contour', z=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def test_hist(self):
draw.draw(clear=True, kind='hist', x=X, bins=2)
draw.draw(clear=True, kind='hist', x=X)
| gpl-2.0 |
jmetzen/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/table.py | 2 | 17111 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <jng@europe.renre.com>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division, print_function
import warnings
import artist
from artist import Artist, allow_rasterization
from patches import Rectangle
from cbook import is_string_like
from matplotlib import docstring
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(self.codes.iterkeys())))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.iterkeys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.iterkeys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in self._cells.values()]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| mit |
sandeepgupta2k4/tensorflow | tensorflow/examples/learn/iris_val_based_early_stopping.py | 62 | 2827 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
learn = tf.contrib.learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/mpl_toolkits/axisartist/axisline_style.py | 8 | 5277 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.transforms import IdentityTransform
from matplotlib.path import Path
import numpy as np
class _FancyAxislineStyle:
class SimpleArrow(FancyArrowPatch):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
arrow_transmuter=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
from matplotlib.bezier import get_cos_sin
x0, y0 = path.vertices[-2]
x1, y1 = path.vertices[-1]
cost, sint = get_cos_sin(x0, y0, x1, y1)
d = mutation_size * 1.
x2, y2 = x1 + cost*d, y1+sint*d
if path.codes is None:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
return _path
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) transform the path to the display coordinate.
2) extend the path to make a room for arrow
3) update the path of the FancyArrowPatch.
4) draw
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() #line_mutation_scale()
extented_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extented_path
FancyArrowPatch.draw(self, renderer)
class FilledArrow(SimpleArrow):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "-|>"
class AxislineStyle(_Style):
"""
:class:`AxislineStyle` is a container class which defines style classes
for AxisArtists.
An instance of any axisline style class is an callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return a mpl artist with following
methods implemented. ::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base(object):
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super(AxislineStyle._Base, self).__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path
(set_path method), return the mpl artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
*size*
size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super(AxislineStyle.SimpleArrow, self).__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0,0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
_style_list["-|>"] = FilledArrow
| mit |
vickyting0910/opengeocoding | 2reinter.py | 1 | 3991 | import pandas as pd
import glob
import time
import numpy as num
inter=sorted(glob.glob('*****.csv'))
w='*****.xlsx'
table1=pd.read_excel(w, '*****', index_col=None, na_values=['NA']).fillna(0)
w='*****.csv'
tab=pd.read_csv(w).fillna(0)
tab.is_copy = False
pd.options.mode.chained_assignment = None
t1=time.time()
for i in range(len(tab)):
if tab["IBR"][i]=='9A' or tab["IBR"][i] == '9B' or tab["IBR"][i] == '09A' or tab["IBR"][i] == '09B':
tab["IBR"][i]='9'
if tab["IBR"][i]=='11A' or tab["IBR"][i] == '11B' or tab["IBR"][i]=='11C' or tab["IBR"][i] == '11D' or tab["IBR"][i]=='36B':
tab["IBR"][i]='11'
if tab["IBR"][i]=='36A' or tab["IBR"][i] == '36B':
tab["IBR"][i]='36'
if tab["IBR"][i]=='13A' or tab["IBR"][i] == '13B' or tab["IBR"][i] == '13C':
tab["IBR"][i]='13'
if tab["IBR"][i]=='23A' or tab["IBR"][i] == '23B' or tab["IBR"][i] == '23E' or tab["IBR"][i] == '23F' or tab["IBR"][i] == '23H':
tab["IBR"][i]='23'
if tab["IBR"][i]=='26A' or tab["IBR"][i] == '26B' or tab["IBR"][i] == '26C' or tab["IBR"][i] == '26D' or tab["IBR"][i] == '26E':
tab["IBR"][i]='26'
if tab["IBR"][i]=='35A' or tab["IBR"][i] == '35B':
tab["IBR"][i]='35'
if tab["IBR"][i]=='36A':
tab["IBR"][i]='36'
if tab["IBR"][i]=='39A' or tab["IBR"][i] == '39B' or tab["IBR"][i] == '39C' or tab["IBR"][i] == '39D':
tab["IBR"][i]='39'
if tab["IBR"][i]=='40A' or tab["IBR"][i] == '40B' or tab["IBR"][i] == '40C':
tab["IBR"][i]='40'
if tab["IBR"][i]=='64A' or tab["IBR"][i] == '64B':
tab["IBR"][i]='64'
if tab["IBR"][i]=='90A' or tab["IBR"][i] == '90B' or tab["IBR"][i] == '90C' or tab["IBR"][i] == '90H' or tab["IBR"][i] == '90F' or tab["IBR"][i] == '90G' or tab["IBR"][i]=='90J' or tab["IBR"][i]=='90Z':
tab["IBR"][i]='90'
#convert to string for the join
for i in range(len(table1)):
table1['IBR_code'][i]=str(table1['IBR_code'][i])
description=table1.set_index([ "IBR_code"])
t2=time.time()
print t2-t1
#index crime
tab["index"]=num.nan
for i in range(len(tab)): #convert to integer
tab["index"][i]=tab.index[i]+1
#join
tab=tab.join(description, on=["IBR"], sort=True, rsuffix='_1', how='outer').fillna(0)
tab=tab[(tab["Reported_address"] != 0)].reset_index(drop=True).fillna(0)
tab["IBR_description"]=tab["crime_des12"]
t3=time.time()
print t3-t2
tab=tab[["Global_ID","Reported_address","Incident_date","Incident_time","Report_date","Report_time","Latitude","Longitude","IBR","IBR_description","Police_Department_Code","PD_description","State_Statute_Literal","State_Statute_Number","flag_geocode",'Fdir_n1','Edir_n1','strname_n1','strtype_n1','Enum_n1','Fdir_n2','Edir_n2','strname_n2','strtype_n2','Enum_n2','comname','mroad1','mratio1','wcorr1','wratio1','mroad2','mratio2','wcorr2','wratio2','match']]
tab=tab.replace("",num.nan)
tab=tab.replace("0",num.nan)
tab=tab.replace("00",num.nan)
tab=tab.replace(0,num.nan)
tab.to_csv('*****.csv',index=False)
for i in range(len(tab)):
tab['Global_ID'][i]=str(tab['Global_ID'][i])
description=tab.set_index([ "Global_ID"])
name1=[i[i.find('inter'):i.rfind('C.csv')+1].replace('_matchgeo','') for i in inter]
for p, q in zip((inter), (name1)):
table1=pd.read_csv(p)
for i in range(len(table1)):
tab['Global_ID'][i]=str(tab['Global_ID'][i])
table1=table1.join(description, on=["Global_ID"], sort=True, rsuffix='_1', how='outer').fillna(0)
table1=table1[(table1["Reported_address"] != 0)].reset_index(drop=True).fillna(0)
table1["IBR_description"]=table1["IBR_description_1"]
table1["IBR"]=table1["IBR_1"]
table1=table1[["Global_ID","Reported_address","Incident_date","Incident_time","Report_date","Report_time","Latitude","Longitude","IBR","IBR_description","Police_Department_Code","PD_description","State_Statute_Literal","State_Statute_Number","flag_geocode",'Fdir_n1','Edir_n1','strname_n1','strtype_n1','Enum_n1','Fdir_n2','Edir_n2','strname_n2','strtype_n2','Enum_n2','comname','mroad1','mratio1','wcorr1','wratio1','mroad2','mratio2','wcorr2','wratio2','match']]
table1.to_csv('*****.csv',index=False)
| bsd-2-clause |
jjhelmus/scipy | scipy/signal/filter_design.py | 14 | 135076 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize
from scipy.special import comb, factorial
from scipy._lib._numpy_compat import polyvalfromroots
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e. zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=None):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
w = findfreqs(z, p, 200, kind='zp')
elif isinstance(worN, int):
N = worN
w = findfreqs(z, p, N, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response::
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 0.2, output='zpk')
>>> w, h = signal.freqz_zpk(z, p, k)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
clairetang6/bokeh | bokeh/charts/builders/bar_builder.py | 5 | 12416 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Bar class which lets you build your Bar charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
from ..builder import Builder, create_and_build
from ...models import FactorRange, Range1d
from ..glyphs import BarGlyph
from ...core.properties import Float, Enum, Bool, Override
from ..properties import Dimension
from ..attributes import ColorAttr, CatAttr
from ..operations import Stack, Dodge
from ...core.enums import Aggregation
from ..stats import stats
from ...models.sources import ColumnDataSource
from ..utils import help
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
class BarBuilder(Builder):
"""This is the Bar builder and it is in charge of plotting
Bar chart (grouped and stacked) in an easy and intuitive way.
Essentially, it utilizes a standardized way to ingest the data,
make the proper calculations and generate renderers. The renderers
reference the transformed data, which represent the groups of data
that were derived from the inputs. We additionally make calculations
for the ranges.
The x_range is categorical, and is made either from the label argument
or from the `pandas.DataFrame.index`. The y_range can be supplied as the
parameter continuous_range, or will be calculated as a linear range
(Range1d) based on the supplied values.
The bar builder is and can be further used as a base class for other
builders that might also be performing some aggregation across
derived groups of data.
"""
# ToDo: add label back as a discrete dimension
values = Dimension('values')
dimensions = ['values']
# req_dimensions = [['values']]
default_attributes = {'label': CatAttr(),
'color': ColorAttr(),
'line_color': ColorAttr(default='white'),
'stack': CatAttr(),
'group': CatAttr()}
agg = Enum(Aggregation, default='sum')
max_height = Float(1.0)
min_height = Float(0.0)
bar_width = Float(default=0.8)
fill_alpha = Float(default=0.8)
glyph = BarGlyph
comp_glyph_types = Override(default=[BarGlyph])
label_attributes = ['stack', 'group']
label_only = Bool(False)
values_only = Bool(False)
_perform_stack = False
_perform_group = False
def setup(self):
if self.attributes['color'].columns is None:
if self.attributes['stack'].columns is not None:
self.attributes['color'].setup(columns=self.attributes['stack'].columns)
if self.attributes['group'].columns is not None:
self.attributes['color'].setup(columns=self.attributes['group'].columns)
if self.attributes['stack'].columns is not None:
self._perform_stack = True
if self.attributes['group'].columns is not None:
self._perform_group = True
# ToDo: perform aggregation validation
# Not given values kw, so using only categorical data
if self.values.dtype.name == 'object' and len(self.attribute_columns) == 0:
# agg must be count
self.agg = 'count'
self.attributes['label'].set_columns(self.values.selection)
else:
pass
self._apply_inferred_index()
if self.xlabel is None:
if self.attributes['label'].columns is not None:
self.xlabel = str(
', '.join(self.attributes['label'].columns).title()).title()
else:
self.xlabel = self.values.selection
if self.ylabel is None:
if not self.label_only:
self.ylabel = '%s( %s )' % (
self.agg.title(), str(self.values.selection).title())
else:
self.ylabel = '%s( %s )' % (
self.agg.title(), ', '.join(self.attributes['label'].columns).title())
def _apply_inferred_index(self):
"""Configure chart when labels are provided as index instead of as kwarg."""
# try to infer grouping vs stacking labels
if (self.attributes['label'].columns is None and
self.values.selection is not None):
if self.attributes['stack'].columns is not None:
special_column = 'unity'
else:
special_column = 'index'
self._data['label'] = special_column
self.attributes['label'].setup(data=ColumnDataSource(self._data.df),
columns=special_column)
self.xlabel = ''
def set_ranges(self):
"""Push the Bar data into the ColumnDataSource and calculate
the proper ranges.
"""
x_items = self.attributes['label'].items
if x_items is None:
x_items = ''
x_labels = []
# Items are identified by tuples. If the tuple has a single value,
# we unpack it
for item in x_items:
item = self._get_label(item)
x_labels.append(str(item))
self.x_range = FactorRange(factors=x_labels)
y_shift = abs(0.1 * ((self.min_height + self.max_height) / 2))
if self.min_height < 0:
start = self.min_height - y_shift
else:
start = 0.0
if self.max_height > 0:
end = self.max_height + y_shift
else:
end = 0.0
self.y_range = Range1d(start=start, end=end)
def get_extra_args(self):
if self.__class__ is not BarBuilder:
attrs = self.properties(with_bases=False)
return {attr: getattr(self, attr) for attr in attrs}
else:
return {}
def yield_renderers(self):
"""Use the rect glyphs to display the bars.
Takes reference points from data loaded at the ColumnDataSource.
"""
kwargs = self.get_extra_args()
attrs = self.collect_attr_kwargs()
for group in self._data.groupby(**self.attributes):
glyph_kwargs = self.get_group_kwargs(group, attrs)
group_kwargs = kwargs.copy()
group_kwargs.update(glyph_kwargs)
props = self.glyph.properties().difference(set(['label']))
# make sure we always pass the color and line color
for k in ['color', 'line_color']:
group_kwargs[k] = group[k]
# TODO(fpliger): we shouldn't need to do this to ensure we don't
# have extra kwargs... this is needed now because
# of label, group and stack being "special"
for k in set(group_kwargs):
if k not in props:
group_kwargs.pop(k)
bg = self.glyph(label=group.label,
x_label=self._get_label(group['label']),
values=group.data[self.values.selection].values,
agg=stats[self.agg](),
width=self.bar_width,
fill_alpha=self.fill_alpha,
stack_label=self._get_label(group['stack']),
dodge_label=self._get_label(group['group']),
**group_kwargs)
self.add_glyph(group, bg)
if self._perform_stack:
Stack().apply(self.comp_glyphs)
if self._perform_group:
Dodge().apply(self.comp_glyphs)
# a higher level function of bar chart is to keep track of max height of all bars
self.max_height = max([renderer.y_max for renderer in self.comp_glyphs])
self.min_height = min([renderer.y_min for renderer in self.comp_glyphs])
for renderer in self.comp_glyphs:
for sub_renderer in renderer.renderers:
yield sub_renderer
@help(BarBuilder)
def Bar(data, label=None, values=None, color=None, stack=None, group=None, agg="sum",
xscale="categorical", yscale="linear", xgrid=False, ygrid=True,
continuous_range=None, **kw):
""" Create a Bar chart using :class:`BarBuilder <bokeh.charts.builders.bar_builder.BarBuilder>`
render the geometry from values, cat and stacked.
Args:
data (:ref:`userguide_charts_data_types`): the data
source for the chart.
label (list(str) or str, optional): list of string representing the categories.
(Defaults to None)
values (str, optional): iterable 2d representing the data series
values matrix.
color (str or list(str) or `~bokeh.charts._attributes.ColorAttr`): string color,
string column name, list of string columns or a custom `ColorAttr`,
which replaces the default `ColorAttr` for the builder.
stack (list(str) or str, optional): columns to use for stacking.
(Defaults to False, so grouping is assumed)
group (list(str) or str, optional): columns to use for grouping.
agg (str): how to aggregate the `values`. (Defaults to 'sum', or only label is
provided, then performs a `count`)
continuous_range(Range1d, optional): Custom continuous_range to be
used. (Defaults to None)
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
Returns:
:class:`Chart`: includes glyph renderers that generate bars
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Bar, output_file, show, hplot
# best support is with data in a format that is table-like
data = {
'sample': ['1st', '2nd', '1st', '2nd', '1st', '2nd'],
'interpreter': ['python', 'python', 'pypy', 'pypy', 'jython', 'jython'],
'timing': [-2, 5, 12, 40, 22, 30]
}
# x-axis labels pulled from the interpreter column, stacking labels from sample column
bar = Bar(data, values='timing', label='interpreter', stack='sample', agg='mean',
title="Python Interpreter Sampling", legend='top_right', plot_width=400)
# table-like data results in reconfiguration of the chart with no data manipulation
bar2 = Bar(data, values='timing', label=['interpreter', 'sample'],
agg='mean', title="Python Interpreters", plot_width=400)
output_file("stacked_bar.html")
show(hplot(bar, bar2))
"""
if continuous_range and not isinstance(continuous_range, Range1d):
raise ValueError(
"continuous_range must be an instance of bokeh.models.ranges.Range1d"
)
if label is not None and values is None:
kw['label_only'] = True
if (agg == 'sum') or (agg == 'mean'):
agg = 'count'
values = label
# The continuous_range is the y_range (until we implement HBar charts)
y_range = continuous_range
kw['label'] = label
kw['values'] = values
kw['color'] = color
kw['stack'] = stack
kw['group'] = group
kw['agg'] = agg
kw['xscale'] = xscale
kw['yscale'] = yscale
kw['xgrid'] = xgrid
kw['ygrid'] = ygrid
kw['y_range'] = y_range
chart = create_and_build(BarBuilder, data, **kw)
# hide x labels if there is a single value, implying stacking only
if len(chart.x_range.factors) == 1 and not label:
chart.below[0].visible = False
return chart
| bsd-3-clause |
imaculate/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
waynenilsen/statsmodels | examples/python/robust_models_0.py | 33 | 2992 |
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
| bsd-3-clause |
JosmanPS/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
LaRiffle/axa_challenge | fonction_py/train.py | 1 | 12400 | from fonction_py.tools import *
from fonction_py.preprocess import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
from fonction_py.tim import *
import time
def faireTout():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
c = pd.DataFrame()
<<<<<<< HEAD
listmodel = faireListModel()#recupere le nom et les modeles de chaque truc
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE dufichier de test
res=[]
model = listmodel[0]
for model in listmodel:
print(model[0]) #affiche le ass assignment
(xTest, x, souvenir, y)=preprocessTOTAL(model[0]) # ajuste le nombre et le nom de feature pour que xTest et x aient les memes
mod= GradientBoostingRegressor(loss='huber', alpha=0.9,n_estimators=100, max_depth=3,learning_rate=.1, min_samples_leaf=9,min_samples_split=9)
mod.fit(x, y) #s'entraine
pred = mod.predict(xTest) # predit
pred[pred>max(y)*1.05]=max(y)*1.05 # pour pas predire trop grand
pred[pred<0]=0 # pas de negatif
pred =np.round(pred).astype(int) # to int
souvenir['prediction']=pred # on l'ajoute a souvenir qui garde le format standard et la date pour qu'on remette tout a la bonne place a la fin
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT']) # on remet chaque prediction à la bonne ligne -> il cree prediction_x et prediction_y car l'ancienne prediction et la nouvelle colonne de prediction
resultat=resultat.fillna(0) # on remplit les endroits ou on a pas predit avec des 0
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y'] # merge les deux colonnes
del resultat['prediction_x']
del resultat['prediction_y']
=======
listmodel = faireListModel()
#'Evenements', 'Gestion Amex'
#setFields = set(pd.read_csv("data/fields.txt", sep=";")['0'].values)
# resultat = pd.read_csv("data/submission.txt", sep="\t")
i=0
# res = []
start_time = time.time()
model = listmodel[24]
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
res=[]
for model in listmodel:
i = i+1
print(model[0])
x,y = preprocess(data.copy(), model[0]) # rajoute les features
model[1].fit(x, y)
#model.score(xTrain, yTrain)
(xTest, souvenir)=preprocessFINAL(x,model[0])
pred = model[1].predict(xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=int(pred)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
x,y = preprocess(data.copy(), 'Téléphonie') # rajoute les features
#model.score(xTrain, yTrain)
(xTest, souvenir)=preprocessFINAL(x,'Téléphonie')
pred=telephoniePred(x,y,xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=int(pred)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
<<<<<<< HEAD
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("vraipred.txt", sep="\t", index =False)
=======
>>>>>>> origin/master
resultat['prediction']=resultat['prediction'].astype(int)
resultat.to_csv("pouranalyse.txt", sep="\t", index =False, encoding='utf-8')
>>>>>>> origin/master
return resultat
def faireListModel():
return [('CAT', linear_model.LinearRegression()),
('CMS', RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Crises',linear_model.LinearRegression()),
('Domicile', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=90, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion - Accueil Telephonique',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=70, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Assurances',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=20, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Clients', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=50, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion DZ', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Relation Clienteles',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=110, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Renault', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=50, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Japon',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Manager',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Mécanicien',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Médical',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Nuit', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Prestataires',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RENAULT',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=80,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RTC',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Regulation Medicale',linear_model.LinearRegression()),
('SAP',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Services',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Axa',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Inter',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Total',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=70,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Téléphonie',GradientBoostingRegressor(loss='huber', alpha=0.9,n_estimators=100, max_depth=3,learning_rate=.1, min_samples_leaf=9,min_samples_split=9) )] | mit |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/skimage/viewer/utils/core.py | 19 | 6555 | import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
from ..._shared.utils import warn
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| mit |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_maths.py | 1 | 20433 | #!/usr/bin/env python
#########################################################################################
#
# Perform mathematical operations on images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os
import sys
import pickle
import gzip
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import spinalcordtoolbox.math as sct_math
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
def get_parser():
parser = SCTArgumentParser(
description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '
'several 3d images separated with ","'
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help="Input file. Example: data.nii.gz",
required=True)
mandatory.add_argument(
"-o",
metavar=Metavar.file,
help='Output file. Example: data_mean.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
basic = parser.add_argument_group('BASIC OPERATIONS')
basic.add_argument(
"-add",
metavar='',
nargs="+",
help='Add following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-sub",
metavar='',
nargs="+",
help='Subtract following input. Can be a number or an image.',
required=False)
basic.add_argument(
"-mul",
metavar='',
nargs="+",
help='Multiply by following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-div",
metavar='',
nargs="+",
help='Divide by following input. Can be a number or an image.',
required=False)
basic.add_argument(
'-mean',
help='Average data across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-rms',
help='Compute root-mean-squared across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-std',
help='Compute STD across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
"-bin",
type=float,
metavar=Metavar.float,
help='Binarize image using specified threshold. Example: 0.5',
required=False)
thresholding = parser.add_argument_group("THRESHOLDING METHODS")
thresholding.add_argument(
'-otsu',
type=int,
metavar=Metavar.int,
help='Threshold image using Otsu algorithm (from skimage). Specify the number of bins (e.g. 16, 64, 128)',
required=False)
thresholding.add_argument(
"-adap",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Adaptive algorithm (from skimage). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-adap 7,0' corresponds to a block size of 7 and an "
"offset of 0.\n"
" - Block size: Odd size of pixel neighborhood which is used to calculate the threshold value. \n"
" - Offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold "
"value. Suggested offset is 0.",
required=False)
thresholding.add_argument(
"-otsu-median",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Median Otsu algorithm (from dipy). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-otsu-median 3,5' corresponds to a filter size of 3 "
"repeated over 5 iterations.\n"
" - Size: Radius (in voxels) of the applied median filter.\n"
" - Iterations: Number of passes of the median filter.",
required=False)
thresholding.add_argument(
'-percent',
type=int,
help="Threshold image using percentile of its histogram.",
metavar=Metavar.int,
required=False)
thresholding.add_argument(
"-thr",
type=float,
help='Use following number to threshold image (zero below number).',
metavar=Metavar.float,
required=False)
mathematical = parser.add_argument_group("MATHEMATICAL MORPHOLOGY")
mathematical.add_argument(
'-dilate',
type=int,
metavar=Metavar.int,
help="Dilate binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-erode',
type=int,
metavar=Metavar.int,
help="Erode binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-shape',
help="R|Shape of the structuring element for the mathematical morphology operation. Default: ball.\n"
"If a 2D shape {'disk', 'square'} is selected, -dim must be specified.",
required=False,
choices=('square', 'cube', 'disk', 'ball'),
default='ball')
mathematical.add_argument(
'-dim',
type=int,
help="Dimension of the array which 2D structural element will be orthogonal to. For example, if you wish to "
"apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.",
required=False,
choices=(0, 1, 2))
filtering = parser.add_argument_group("FILTERING METHODS")
filtering.add_argument(
"-smooth",
metavar=Metavar.list,
type=list_type(',', float),
help='Gaussian smoothing filtering. Supply values for standard deviations in mm. If a single value is provided, '
'it will be applied to each axis of the image. If multiple values are provided, there must be one value '
'per image axis. (Examples: "-smooth 2.0,3.0,2.0" (3D image), "-smooth 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-laplacian',
metavar=Metavar.list,
type=list_type(',', float),
help='Laplacian filtering. Supply values for standard deviations in mm. If a single value is provided, it will '
'be applied to each axis of the image. If multiple values are provided, there must be one value per '
'image axis. (Examples: "-laplacian 2.0,3.0,2.0" (3D image), "-laplacian 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-denoise',
help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with ". Example: p=1,b=3\n'
' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\n'
' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '
' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\n'
'To use default parameters, write -denoise 1',
required=False)
similarity = parser.add_argument_group("SIMILARITY METRIC")
similarity.add_argument(
'-mi',
metavar=Metavar.file,
help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',
required=False)
similarity.add_argument(
'-minorm',
metavar=Metavar.file,
help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',
required=False)
similarity.add_argument(
'-corr',
metavar=Metavar.file,
help='Compute the cross correlation (CC) between both input files (-i and -cc).',
required=False)
misc = parser.add_argument_group("MISC")
misc.add_argument(
'-symmetrize',
type=int,
help='Symmetrize data along the specified dimension.',
required=False,
choices=(0, 1, 2))
misc.add_argument(
'-type',
required=False,
help='Output type.',
choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',
'uint64'))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
dim_list = ['x', 'y', 'z', 't']
fname_in = arguments.i
fname_out = arguments.o
output_type = arguments.type
# Open file(s)
im = Image(fname_in)
data = im.data # 3d or 4d numpy array
dim = im.dim
# run command
if arguments.otsu is not None:
param = arguments.otsu
data_out = sct_math.otsu(data, param)
elif arguments.adap is not None:
param = arguments.adap
data_out = sct_math.adap(data, param[0], param[1])
elif arguments.otsu_median is not None:
param = arguments.otsu_median
data_out = sct_math.otsu_median(data, param[0], param[1])
elif arguments.thr is not None:
param = arguments.thr
data_out = sct_math.threshold(data, param)
elif arguments.percent is not None:
param = arguments.percent
data_out = sct_math.perc(data, param)
elif arguments.bin is not None:
bin_thr = arguments.bin
data_out = sct_math.binarize(data, bin_thr=bin_thr)
elif arguments.add is not None:
data2 = get_data_or_scalar(arguments.add, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.sum(data_concat, axis=3)
elif arguments.sub is not None:
data2 = get_data_or_scalar(arguments.sub, data)
data_out = data - data2
elif arguments.laplacian is not None:
sigmas = arguments.laplacian
if len(sigmas) == 1:
sigmas = [sigmas for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.laplacian(data, sigmas)
elif arguments.mul is not None:
data2 = get_data_or_scalar(arguments.mul, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.prod(data_concat, axis=3)
elif arguments.div is not None:
data2 = get_data_or_scalar(arguments.div, data)
data_out = np.divide(data, data2)
elif arguments.mean is not None:
dim = dim_list.index(arguments.mean)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.mean(data, dim)
elif arguments.rms is not None:
dim = dim_list.index(arguments.rms)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))
elif arguments.std is not None:
dim = dim_list.index(arguments.std)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.std(data, dim, ddof=1)
elif arguments.smooth is not None:
sigmas = arguments.smooth
if len(sigmas) == 1:
sigmas = [sigmas[0] for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.smooth(data, sigmas)
elif arguments.dilate is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -dilate with 2D morphological kernel'))
data_out = sct_math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim)
elif arguments.erode is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -erode with 2D morphological kernel'))
data_out = sct_math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim)
elif arguments.denoise is not None:
# parse denoising arguments
p, b = 1, 5 # default arguments
list_denoise = (arguments.denoise).split(",")
for i in list_denoise:
if 'p' in i:
p = int(i.split('=')[1])
if 'b' in i:
b = int(i.split('=')[1])
data_out = sct_math.denoise_nlmeans(data, patch_radius=p, block_radius=b)
elif arguments.symmetrize is not None:
data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)
elif arguments.mi is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.mi)
compute_similarity(im, im_2, fname_out, metric='mi', metric_full='Mutual information', verbose=verbose)
data_out = None
elif arguments.minorm is not None:
im_2 = Image(arguments.minorm)
compute_similarity(im, im_2, fname_out, metric='minorm', metric_full='Normalized Mutual information', verbose=verbose)
data_out = None
elif arguments.corr is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.corr)
compute_similarity(im, im_2, fname_out, metric='corr', metric_full='Pearson correlation coefficient', verbose=verbose)
data_out = None
# if no flag is set
else:
data_out = None
printv(parser.error('ERROR: you need to specify an operation to do on the input image'))
if data_out is not None:
# Write output
nii_out = Image(fname_in) # use header of input file
nii_out.data = data_out
nii_out.save(fname_out, dtype=output_type)
# TODO: case of multiple outputs
# assert len(data_out) == n_out
# if n_in == n_out:
# for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
# im_in.data = d_out
# im_in.absolutepath = fn_out
# if arguments.w is not None:
# im_in.hdr.set_intent('vector', (), '')
# im_in.save()
# elif n_out == 1:
# nii[0].data = data_out[0]
# nii[0].absolutepath = fname_out[0]
# if arguments.w is not None:
# nii[0].hdr.set_intent('vector', (), '')
# nii[0].save()
# elif n_out > n_in:
# for dat_out, name_out in zip(data_out, fname_out):
# im_out = nii[0].copy()
# im_out.data = dat_out
# im_out.absolutepath = name_out
# if arguments.w is not None:
# im_out.hdr.set_intent('vector', (), '')
# im_out.save()
# else:
# printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))
# display message
if data_out is not None:
display_viewer_syntax([fname_out], verbose=verbose)
else:
printv('\nDone! File created: ' + fname_out, verbose, 'info')
def get_data(list_fname):
"""
Get data from list of file names
:param list_fname:
:return: 3D or 4D numpy array.
"""
try:
nii = [Image(f_in) for f_in in list_fname]
except Exception as e:
printv(str(e), 1, 'error') # file does not exist, exit program
data0 = nii[0].data
data = nii[0].data
# check that every images have same shape
for i in range(1, len(nii)):
if not np.shape(nii[i].data) == np.shape(data0):
printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
printv('\nERROR: All input images must have same dimensions.', 1, 'error')
else:
data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
return data
def get_data_or_scalar(argument, data_in):
"""
Get data from list of file names (scenario 1) or scalar (scenario 2)
:param argument: list of file names of scalar
:param data_in: if argument is scalar, use data to get np.shape
:return: 3d or 4d numpy array
"""
# try to convert argument in float
try:
# build data2 with same shape as data
data_out = data_in[:, :, :] * 0 + float(argument[0])
# if conversion fails, it should be a string (i.e. file name)
except ValueError:
data_out = get_data(argument)
return data_out
def compute_similarity(img1: Image, img2: Image, fname_out: str, metric: str, metric_full: str, verbose):
"""
Sanitize input and compute similarity metric between two images data.
"""
if img1.data.size != img2.data.size:
raise ValueError(f"Input images don't have the same size! \nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space")
res, data1_1d, data2_1d = sct_math.compute_similarity(img1.data, img2.data, metric=metric)
if verbose > 1:
matplotlib.use('Agg')
plt.plot(data1_1d, 'b')
plt.plot(data2_1d, 'r')
plt.title('Similarity: ' + metric_full + ' = ' + str(res))
plt.savefig('fig_similarity.png')
path_out, filename_out, ext_out = extract_fname(fname_out)
if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:
raise ValueError(f"The output file should a text file or a pickle file. Received extension: {ext_out}")
if ext_out == '.txt':
with open(fname_out, 'w') as f:
f.write(metric_full + ': \n' + str(res))
elif ext_out == '.pklz':
pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)
else:
pickle.dump(res, open(fname_out, 'w'), protocol=2)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit |
pompiduskus/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
andrewgiessel/folium | folium/utilities.py | 1 | 19979 | # -*- coding: utf-8 -*-
"""
Utilities
-------
Utility module for Folium helper functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import math
import zlib
import struct
import json
import base64
from jinja2 import Environment, PackageLoader
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
from folium.six import iteritems, text_type, binary_type
def get_templates():
"""Get Jinja templates."""
return Environment(loader=PackageLoader('folium', 'templates'))
def legend_scaler(legend_values, max_labels=10.0):
"""
Downsamples the number of legend values so that there isn't a collision
of text on the legend colorbar (within reason). The colorbar seems to
support ~10 entries as a maximum.
"""
if len(legend_values) < max_labels:
legend_ticks = legend_values
else:
spacer = int(math.ceil(len(legend_values)/max_labels))
legend_ticks = []
for i in legend_values[::spacer]:
legend_ticks += [i]
legend_ticks += ['']*(spacer-1)
return legend_ticks
def linear_gradient(hexList, nColors):
"""
Given a list of hexcode values, will return a list of length
nColors where the colors are linearly interpolated between the
(r, g, b) tuples that are given.
Example:
linear_gradient([(0, 0, 0), (255, 0, 0), (255, 255, 0)], 100)
"""
def _scale(start, finish, length, i):
"""
Return the value correct value of a number that is in between start
and finish, for use in a loop of length *length*.
"""
base = 16
fraction = float(i) / (length - 1)
raynge = int(finish, base) - int(start, base)
thex = hex(int(int(start, base) + fraction * raynge)).split('x')[-1]
if len(thex) != 2:
thex = '0' + thex
return thex
allColors = []
# Separate (R, G, B) pairs.
for start, end in zip(hexList[:-1], hexList[1:]):
# Linearly intepolate between pair of hex ###### values and
# add to list.
nInterpolate = 765
for index in range(nInterpolate):
r = _scale(start[1:3], end[1:3], nInterpolate, index)
g = _scale(start[3:5], end[3:5], nInterpolate, index)
b = _scale(start[5:7], end[5:7], nInterpolate, index)
allColors.append(''.join(['#', r, g, b]))
# Pick only nColors colors from the total list.
result = []
for counter in range(nColors):
fraction = float(counter) / (nColors - 1)
index = int(fraction * (len(allColors) - 1))
result.append(allColors[index])
return result
def color_brewer(color_code, n=6):
"""
Generate a colorbrewer color scheme of length 'len', type 'scheme.
Live examples can be seen at http://colorbrewer2.org/
"""
maximum_n = 253
scheme_info = {'BuGn': 'Sequential',
'BuPu': 'Sequential',
'GnBu': 'Sequential',
'OrRd': 'Sequential',
'PuBu': 'Sequential',
'PuBuGn': 'Sequential',
'PuRd': 'Sequential',
'RdPu': 'Sequential',
'YlGn': 'Sequential',
'YlGnBu': 'Sequential',
'YlOrBr': 'Sequential',
'YlOrRd': 'Sequential',
'BrBg': 'Diverging',
'PiYG': 'Diverging',
'PRGn': 'Diverging',
'PuOr': 'Diverging',
'RdBu': 'Diverging',
'RdGy': 'Diverging',
'RdYlBu': 'Diverging',
'RdYlGn': 'Diverging',
'Spectral': 'Diverging',
'Accent': 'Qualitative',
'Dark2': 'Qualitative',
'Paired': 'Qualitative',
'Pastel1': 'Qualitative',
'Pastel2': 'Qualitative',
'Set1': 'Qualitative',
'Set2': 'Qualitative',
'Set3': 'Qualitative',
}
schemes = {'BuGn': ['#EDF8FB', '#CCECE6', '#CCECE6',
'#66C2A4', '#41AE76', '#238B45', '#005824'],
'BuPu': ['#EDF8FB', '#BFD3E6', '#9EBCDA',
'#8C96C6', '#8C6BB1', '#88419D', '#6E016B'],
'GnBu': ['#F0F9E8', '#CCEBC5', '#A8DDB5',
'#7BCCC4', '#4EB3D3', '#2B8CBE', '#08589E'],
'OrRd': ['#FEF0D9', '#FDD49E', '#FDBB84',
'#FC8D59', '#EF6548', '#D7301F', '#990000'],
'PuBu': ['#F1EEF6', '#D0D1E6', '#A6BDDB',
'#74A9CF', '#3690C0', '#0570B0', '#034E7B'],
'PuBuGn': ['#F6EFF7', '#D0D1E6', '#A6BDDB',
'#67A9CF', '#3690C0', '#02818A', '#016450'],
'PuRd': ['#F1EEF6', '#D4B9DA', '#C994C7',
'#DF65B0', '#E7298A', '#CE1256', '#91003F'],
'RdPu': ['#FEEBE2', '#FCC5C0', '#FA9FB5',
'#F768A1', '#DD3497', '#AE017E', '#7A0177'],
'YlGn': ['#FFFFCC', '#D9F0A3', '#ADDD8E',
'#78C679', '#41AB5D', '#238443', '#005A32'],
'YlGnBu': ['#FFFFCC', '#C7E9B4', '#7FCDBB',
'#41B6C4', '#1D91C0', '#225EA8', '#0C2C84'],
'YlOrBr': ['#FFFFD4', '#FEE391', '#FEC44F',
'#FE9929', '#EC7014', '#CC4C02', '#8C2D04'],
'YlOrRd': ['#FFFFB2', '#FED976', '#FEB24C',
'#FD8D3C', '#FC4E2A', '#E31A1C', '#B10026'],
'BrBg': ['#8c510a', '#d8b365', '#f6e8c3',
'#c7eae5', '#5ab4ac', '#01665e'],
'PiYG': ['#c51b7d', '#e9a3c9', '#fde0ef',
'#e6f5d0', '#a1d76a', '#4d9221'],
'PRGn': ['#762a83', '#af8dc3', '#e7d4e8',
'#d9f0d3', '#7fbf7b', '#1b7837'],
'PuOr': ['#b35806', '#f1a340', '#fee0b6',
'#d8daeb', '#998ec3', '#542788'],
'RdBu': ['#b2182b', '#ef8a62', '#fddbc7',
'#d1e5f0', '#67a9cf', '#2166ac'],
'RdGy': ['#b2182b', '#ef8a62', '#fddbc7',
'#e0e0e0', '#999999', '#4d4d4d'],
'RdYlBu': ['#d73027', '#fc8d59', '#fee090',
'#e0f3f8', '#91bfdb', '#4575b4'],
'RdYlGn': ['#d73027', '#fc8d59', '#fee08b',
'#d9ef8b', '#91cf60', '#1a9850'],
'Spectral': ['#d53e4f', '#fc8d59', '#fee08b',
'#e6f598', '#99d594', '#3288bd'],
'Accent': ['#7fc97f', '#beaed4', '#fdc086',
'#ffff99', '#386cb0', '#f0027f'],
'Dark2': ['#1b9e77', '#d95f02', '#7570b3',
'#e7298a', '#66a61e', '#e6ab02'],
'Paired': ['#a6cee3', '#1f78b4', '#b2df8a',
'#33a02c', '#fb9a99', '#e31a1c'],
'Pastel1': ['#fbb4ae', '#b3cde3', '#ccebc5',
'#decbe4', '#fed9a6', '#ffffcc'],
'Pastel2': ['#b3e2cd', '#fdcdac', '#cbd5e8',
'#f4cae4', '#e6f5c9', '#fff2ae'],
'Set1': ['#e41a1c', '#377eb8', '#4daf4a',
'#984ea3', '#ff7f00', '#ffff33'],
'Set2': ['#66c2a5', '#fc8d62', '#8da0cb',
'#e78ac3', '#a6d854', '#ffd92f'],
'Set3': ['#8dd3c7', '#ffffb3', '#bebada',
'#fb8072', '#80b1d3', '#fdb462'],
}
# Raise an error if the n requested is greater than the maximum.
if n > maximum_n:
raise ValueError("The maximum number of colors in a"
" ColorBrewer sequential color series is 253")
# Only if n is greater than six do we interpolate values.
if n > 6:
if color_code not in schemes:
color_scheme = None
else:
# Check to make sure that it is not a qualitative scheme.
if scheme_info[color_code] == 'Qualitative':
raise ValueError("Expanded color support is not available"
" for Qualitative schemes, restrict"
" number of colors to 6")
else:
color_scheme = linear_gradient(schemes.get(color_code), n)
else:
color_scheme = schemes.get(color_code, None)
return color_scheme
def transform_data(data):
"""
Transform Pandas DataFrame into JSON format.
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series
Returns
-------
JSON compatible dict
Example
-------
>>> transform_data(df)
"""
if pd is None:
raise ImportError("The Pandas package is required"
" for this functionality")
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
def type_check(value):
"""
Type check values for JSON serialization. Native Python JSON
serialization will not recognize some Numpy data types properly,
so they must be explicitly converted.
"""
if pd.isnull(value):
return None
elif (isinstance(value, pd.tslib.Timestamp) or
isinstance(value, pd.Period)):
return time.mktime(value.timetuple())
elif isinstance(value, (int, np.integer)):
return int(value)
elif isinstance(value, (float, np.float_)):
return float(value)
elif isinstance(value, str):
return str(value)
else:
return value
if isinstance(data, pd.Series):
json_data = [{type_check(x): type_check(y) for
x, y in iteritems(data)}]
elif isinstance(data, pd.DataFrame):
json_data = [{type_check(y): type_check(z) for
x, y, z in data.itertuples()}]
return json_data
def split_six(series=None):
"""
Given a Pandas Series, get a domain of values from zero to the 90% quantile
rounded to the nearest order-of-magnitude integer. For example, 2100 is
rounded to 2000, 2790 to 3000.
Parameters
----------
series: Pandas series, default None
Returns
-------
list
"""
if pd is None:
raise ImportError("The Pandas package is required"
" for this functionality")
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
def base(x):
if x > 0:
base = pow(10, math.floor(math.log10(x)))
return round(x/base)*base
else:
return 0
quants = [0, 50, 75, 85, 90]
# Some weirdness in series quantiles a la 0.13.
arr = series.values
return [base(np.percentile(arr, x)) for x in quants]
def mercator_transform(data, lat_bounds, origin='upper', height_out=None):
"""Transforms an image computed in (longitude,latitude) coordinates into
the a Mercator projection image.
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
lat_bounds : length 2 tuple
Minimal and maximal value of the latitude of the image.
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
height_out : int, default None
The expected height of the output.
If None, the height of the input is used.
"""
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
mercator = lambda x: np.arcsinh(np.tan(x*np.pi/180.))*180./np.pi
array = np.atleast_3d(data).copy()
height, width, nblayers = array.shape
lat_min, lat_max = lat_bounds
if height_out is None:
height_out = height
# Eventually flip the image
if origin == 'upper':
array = array[::-1, :, :]
lats = (lat_min + np.linspace(0.5/height, 1.-0.5/height, height) *
(lat_max-lat_min))
latslats = (mercator(lat_min) +
np.linspace(0.5/height_out, 1.-0.5/height_out, height_out) *
(mercator(lat_max)-mercator(lat_min)))
out = np.zeros((height_out, width, nblayers))
for i in range(width):
for j in range(4):
out[:, i, j] = np.interp(latslats, mercator(lats), array[:, i, j])
# Eventually flip the image.
if origin == 'upper':
out = out[::-1, :, :]
return out
def image_to_url(image, mercator_project=False, colormap=None,
origin='upper', bounds=((-90, -180), (90, 180))):
"""Infers the type of an image argument and transforms it into a URL.
Parameters
----------
image: string, file or array-like object
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embedded in the
output file.
* If array-like, it will be converted to PNG base64 string and
embedded in the output.
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0, 0] index of the array in the upper left or
lower left corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint : you can use colormaps from `matplotlib.cm`.
mercator_project : bool, default False, used for array-like image.
Transforms the data to project (longitude,latitude)
coordinates to the Mercator projection.
bounds: list-like, default ((-90, -180), (90, 180))
Image bounds on the map in the form
[[lat_min, lon_min], [lat_max, lon_max]].
Only used if mercator_project is True.
"""
if hasattr(image, 'read'):
# We got an image file.
if hasattr(image, 'name'):
# We try to get the image format from the file name.
fileformat = image.name.lower().split('.')[-1]
else:
fileformat = 'png'
url = "data:image/{};base64,{}".format(
fileformat, base64.b64encode(image.read()).decode('utf-8'))
elif (not (isinstance(image, text_type) or
isinstance(image, binary_type))) and hasattr(image, '__iter__'):
# We got an array-like object.
if mercator_project:
data = mercator_transform(image,
[bounds[0][0], bounds[1][0]],
origin=origin)
else:
data = image
png = write_png(data, origin=origin, colormap=colormap)
url = "data:image/png;base64," + base64.b64encode(png).decode('utf-8')
else:
# We got an URL.
url = json.loads(json.dumps(image))
return url.replace('\n', ' ')
def write_png(data, origin='upper', colormap=None):
"""
Transform an array of data into a PNG string.
This can be written to disk using binary I/O, or encoded using base64
for an inline PNG like this:
>>> png_str = write_png(array)
>>> "data:image/png;base64,"+png_str.encode('base64')
Inspired from
http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint: you can use colormaps from `matplotlib.cm`.
Returns
-------
PNG formatted byte string
"""
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
if colormap is None:
colormap = lambda x: (x, x, x, 1)
array = np.atleast_3d(data)
height, width, nblayers = array.shape
if nblayers not in [1, 3, 4]:
raise ValueError("Data must be NxM (mono), "
"NxMx3 (RGB), or NxMx4 (RGBA)")
assert array.shape == (height, width, nblayers)
if nblayers == 1:
array = np.array(list(map(colormap, array.ravel())))
nblayers = array.shape[1]
if nblayers not in [3, 4]:
raise ValueError("colormap must provide colors of"
"length 3 (RGB) or 4 (RGBA)")
array = array.reshape((height, width, nblayers))
assert array.shape == (height, width, nblayers)
if nblayers == 3:
array = np.concatenate((array, np.ones((height, width, 1))), axis=2)
nblayers = 4
assert array.shape == (height, width, nblayers)
assert nblayers == 4
# Normalize to uint8 if it isn't already.
if array.dtype != 'uint8':
array = array * 255./array.max(axis=(0, 1)).reshape((1, 1, 4))
array = array.astype('uint8')
# Eventually flip the image.
if origin == 'lower':
array = array[::-1, :, :]
# Transform the array to bytes.
raw_data = b''.join([b'\x00' + array[i, :, :].tobytes()
for i in range(height)])
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
def _camelify(out):
return (''.join(["_" + x.lower() if i < len(out)-1 and x.isupper() and out[i+1].islower() # noqa
else x.lower() + "_" if i < len(out)-1 and x.islower() and out[i+1].isupper() # noqa
else x.lower() for i, x in enumerate(list(out))])).lstrip('_').replace('__', '_') # noqa
def _parse_size(value):
try:
if isinstance(value, int) or isinstance(value, float):
value_type = 'px'
value = float(value)
assert value > 0
else:
value_type = '%'
value = float(value.strip('%'))
assert 0 <= value <= 100
except:
msg = "Cannot parse value {!r} as {!r}".format
raise ValueError(msg(value, value_type))
return value, value_type
def _locations_mirror(x):
"""Mirrors the points in a list-of-list-of-...-of-list-of-points.
For example:
>>> _locations_mirror([[[1, 2], [3, 4]], [5, 6], [7, 8]])
[[[2, 1], [4, 3]], [6, 5], [8, 7]]
"""
if hasattr(x, '__iter__'):
if hasattr(x[0], '__iter__'):
return list(map(_locations_mirror, x))
else:
return list(x[::-1])
else:
return x
def _locations_tolist(x):
"""Transforms recursively a list of iterables into a list of list.
"""
if hasattr(x, '__iter__'):
return list(map(_locations_tolist, x))
else:
return x
| mit |
linsalrob/EdwardsLab | phage_protein_blast_genera/tax_violin_plots.py | 1 | 2239 | """
"""
import os
import sys
import argparse
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-f', help='Genome average output file (from genera_per_phage_protein.py', default='/home/redwards/Desktop/gav_all_host.out')
parser.add_argument('-n', help='taxonomy name one of: kingdom / phylum / genus / species', default='genus')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
ynames = {'kingdom' : 'kingdoms', 'phylum' : 'phyla', 'genus' : 'genera', 'species' : 'species'}
col = None
colkey = {'kingdom' : 3, 'phylum' : 4, 'genus' : 5, 'species' : 6}
if args.n not in colkey:
sys.stderr.write("Sorry, taxonomy name must be one of {}\n".format("|".join(list(colkey.keys()))))
sys.exit(-1)
col = colkey[args.n]
want = {'Gut', 'Mouth', 'Nose', 'Skin', 'Lungs'}
data = {}
with open(args.f, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in want:
p[2] = 'All phages'
#continue ## comment or uncomment this to include/exclude all data
if p[2] not in data:
data[p[2]] = []
data[p[2]].append(float(p[col]))
labels = sorted(data.keys())
scores = []
count = 1
ticks = []
for l in labels:
scores.append(data[l])
ticks.append(count)
count += 1
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.boxplot(alldata)
vp = ax.violinplot(scores, showmeans=True)
for i, j in enumerate(vp['bodies']):
if i == 0:
j.set_color('gray')
elif i == 1:
j.set_color('sandybrown')
else:
j.set_color('lightpink')
ax.set_xlabel("Body Site")
ax.set_ylabel("Average number of {}".format(ynames[args.n]))
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation='vertical')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.tight_layout()
#plt.show()
fig.savefig("/home/redwards/Desktop/bodysites.png")
| mit |
dav-stott/phd-thesis | spectra_thesis_ais.py | 1 | 70177 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 25 08:48:28 2014
@author: david
"""
#*************** IMPORT DEPENDANCIES*******************************************
import numpy as np
#import spec_gdal4 as spg
from osgeo import gdal
import os
import csv
#import h5py
import datetime
import numpy.ma as ma
#from StringIO import StringIO
#import shapely
#import r2py
from osgeo import gdal_array
from osgeo import gdalconst
from osgeo.gdalconst import *
from osgeo import ogr
from osgeo import osr
from scipy.spatial import ConvexHull
from scipy.signal import find_peaks_cwt
from scipy.signal import savgol_filter
from scipy import interpolate
import matplotlib.pyplot as plt
#from shapely.geometry import LineString
################# Functions ###################################################
'''These here are functions that are not part of any specific class- these
are used by the data import classes for functions such as smoothing'''
def smoothing(perc_out, block_start, block_end, kparam, weight, sparam):
#D
sm_spline_block = perc_out[block_start:block_end,:]
sm_x = sm_spline_block[:,0]
sm_y = sm_spline_block[:,1]
sm_len = sm_x.shape
sm_weights = np.zeros(sm_len)+weight
sm_spline = interpolate.UnivariateSpline(sm_x,
sm_y,
k=kparam,
w=sm_weights,
s=sparam)
spline = sm_spline(sm_x)
spline = np.column_stack((sm_x,spline))
return spline
def interpolate_gaps(array1, array2):
array_end = array1.shape[0]-1
array1_endx = array1[array_end, 0]
#get the start point of the second array
array2_start = array2[0,0]
#get the length of the area to be interpolated
x_len = array2_start-array1_endx+1
#generate x values to use for the array
xvals = np.linspace(array1_endx, array2_start, num=x_len)
#y val for the start of the interpolated area
yval_array1 = array1[array_end,1]
# y val for the end of interpolated area
yval_array2 = array2[0,1]
#stack the values into a new array
xin = np.append(array1_endx, array2_start)
yin = np.append(yval_array1, yval_array2)
#numpy.interp(x, xp, fp)
gap_filling = np.interp(xvals, xin, yin)
filled_x = np.column_stack((xvals, gap_filling))
print (filled_x.shape)
return filled_x
class absorption_feature():
'''this class is used for the characterisation of spectral absortion features,
and their investigation using continuum removal'''
def __init__(self, spectra, feat_start, feat_end, feat_centre):
self.wl = spectra[:,0]
self.values = spectra[:,1]
print ('CALL TO ABSORPTION FEATURE')
# start of absorption feature
self.feat_start = feat_start
# end of absorption feature
self.feat_end = feat_end
# approximate 'centre' of feature
self.feat_centre = feat_centre
#get the range of the data
self.min_wl = self.wl[0]
self.max_wl = self.wl[-1]
print ('Absorption feature',self.feat_start,self.feat_end)
#define feature name
self.feat_name = str(self.feat_start)+'_'+str(self.feat_end)
'''# if the feature is within the range of the sensor, do stuff
if self.feat_start > self.min_wl and self.feat_end < self.max_wl:
print 'can do stuff with this data'
try:
self.abs_feature()
print ('Absorption feature analysis sussceful')
except:
print ('ERROR analysing absorption feature', self.feat_name)
pass
else:
print ('Cannot define feature: Out of range')'''
########## Methods ##################################################
def abs_feature(self):
print ('Call to abs_feature made')
# Meffod to calculate the end points of the absorption feature
# Does this using the Qhull algorithim form scipy spatial
#use the initial defintnion of the absorption feature as a staring point
# get the indices for these
cont_rem_stacked = None
ft_def_stacked = None
start_point = np.argmin(np.abs(self.wl-self.feat_start))
end_point = np.argmin(np.abs(self.wl-self.feat_end))
centre = np.argmin(np.abs(self.wl-self.feat_centre))
#find the index minima of reflectance
minima = np.argmin(self.values[start_point:end_point])+start_point
# if the minima = the start point then the start point is the minima
if minima == start_point:
left = minima
#if not then the left side of the feature is the maixima on the left of the minima
elif minima <= centre:
left = start_point+np.argmax(self.values[start_point:centre])
else:
left = start_point+np.argmax(self.values[start_point:minima])
#right is the maxima on the right of the absorption feature
if minima == end_point:
right = minima
else:
right = minima+np.argmax(self.values[minima:end_point])
# use left and right to create a 2D array of points
hull_in = np.column_stack((self.wl[left:right],self.values[left:right]))
#determine the minima of the points
hull_min = minima-left
if hull_min <= 0:
hull_min=0
#find the wavelength at minima
hull_min_wl = hull_in[hull_min,0]
# define the wavelength ranges we'll use to select simplices
ft_left_wl = hull_min_wl-((hull_min_wl-hull_in[0,0])/2)
ft_right_wl = hull_min_wl+((hull_in[-1,0]-hull_min_wl)/2)
#use scipy.spatial convex hull to determine the convex hull of the points
hull = ConvexHull(hull_in)
# get the simplex tuples from the convex hull
simplexes = hull.simplices
# create an empty list to store simplices potentially related to our feature
feat_pos = []
#iterate through the simplices
for simplex in simplexes:
#extract vertices from simplices
vertex1 = simplex[0]
vertex2 = simplex[1]
#print 'VERT!',hull_in[vertex1,0],hull_in[vertex2,0]
''' We're only interested in the upper hull. Qhull moves counter-
clockwise. Therefore we're only interested in those points where
vertex 1 is greater than vertex 2'''
'''The above may be total bollocks'''
if not vertex1 < vertex2:
'''We then use the wavelength ranges to determine which simplices
relate to our absorption feature'''
if hull_in[vertex2,0] <= ft_left_wl and \
hull_in[vertex2,0] >= self.wl[left] and \
hull_in[vertex1,0] >= ft_right_wl and \
hull_in[vertex1,0] <= self.wl[right]:
# append the vertices to the list
print (hull_in[vertex2,0])
print (hull_in[vertex1,0])
feat_pos.append((vertex2,vertex1))
print ('feat_pos length:',len(feat_pos), type(feat_pos))
#print feat_pos[0],feat_pos[1]
else:
continue
'''We only want one feature here. If there's more than one or less
than one we're not interested as we're probably not dealing with
vegetation'''
# If there's less than one feature...
if len(feat_pos) < 1:
print ('Absorption feature cannot be defined:less than one feature')
ft_def_stacked = None
ft_def_hdr = None
cont_rem_stacked = None
elif len(feat_pos) == 1:
feat_pos=feat_pos[0]
print ('£££££',feat_pos, type(feat_pos))
else:
#if theres more than one fid the widest one. this is not optimal.
if len(feat_pos) >1:
feat_width = []
for pair in feat_pos:
feat_width.append(pair[1]-pair[0])
print ('feat width:', feat_width)
#feat_width = np.asarray(feat_width)
print (feat_width)
f_max = feat_width.index(max(feat_width))
print (f_max)
feat_pos = feat_pos[f_max]
print (type(feat_pos))
if not feat_pos==None:
feat_pos = feat_pos[0], feat_pos[1]
print ('DOES MY FEAT_POS CONVERSION WORK?', feat_pos)
print ('Analysing absorption feature')
#slice
feature = hull_in[feat_pos[0]:feat_pos[1],:]
print ('Feature shape',feature.shape,'start:',feature[0,0],'end:',feature[-1,0])
#get the minima in the slice
minima_pos = np.argmin(feature[:,1])
#continuum removal
contrem = self.continuum_removal(feature,minima_pos)
# set up single value outputs
# start of feature
refined_start = feature[0,0]
# end of feature
refined_end = feature[-1,0]
# wavelength at minima
minima_WL = feature[minima_pos,0]
# reflectance at minima
minima_R = feature[minima_pos,1]
# area of absorption feature
feat_area = contrem[4]
# two band normalised index of minima and start of feature
left_tbvi = (refined_start-minima_R)/(refined_start+minima_R)
# two band normalised index of minima and right of feature
right_tbvi = (refined_end-minima_R)/(refined_end+minima_R)
# gradient of the continuum line
cont_gradient = np.mean(np.gradient(contrem[0]))
# area of continuum removed absorption feature
cont_rem_area = contrem[3]
# maxima of continuum removed absorption feature
cont_rem_maxima = np.max(contrem[1])
# wavelength of maxima of continuum removed absorption feature
cont_rem_maxima_wl = feature[np.argmax(contrem[1]),0]
#area of left part of continuum removed feature
cont_area_l = contrem[5]
if cont_area_l == None:
cont_area_l=0
#are aof right part of continuum removed feature
cont_area_r = contrem[6]
#stack these into a lovely array
ft_def_stacked = np.column_stack((refined_start,
refined_end,
minima_WL,
minima_R,
feat_area,
left_tbvi,
right_tbvi,
cont_gradient,
cont_rem_area,
cont_rem_maxima,
cont_rem_maxima_wl,
cont_area_l,
cont_area_r))
ft_def_hdr = str('"Refined start",'+
'"Refined end",'+
'"Minima Wavelenght",'+
'"Minima Reflectance",'+
'"Feature Area",'+
'"Left TBVI",'+
'"Right TBVI",'+
'"Continuum Gradient",'+
'"Continuum Removed Area",'+
'"Continuum Removed Maxima",'+
'"Continuum Removed Maxima WL",'+
'"Continuum Removed Area Left",'+
'"Continuum Removed Area Right",')
#print ft_def_stacked.shape #save the stacked outputs as hdf
# stack the 2d continuum removed outputs
cont_rem_stacked = np.column_stack((feature[:,0],
feature[:,1],
contrem[0],
contrem[1],
contrem[2]))
print ('CREM', cont_rem_stacked.shape)
return ft_def_stacked, ft_def_hdr, cont_rem_stacked
def continuum_removal(self,feature,minima):
#method to perform continuum r=<emoval
#pull out endmenmbers
end_memb = np.vstack((feature[0,:],feature[-1,:]))
#interpolate between the endmembers using x intervals
continuum_line = np.interp(feature[:,0], end_memb[:,0], end_memb[:,1])
#continuum removal
continuum_removed = continuum_line/feature[:,1]
#stack into coord pairs so we can measure the area of the feature
ft_coords = np.vstack((feature,
np.column_stack((feature[:,0],continuum_line))))
#get the area
area = self.area(ft_coords)
#get the area of the continuum removed feature
cont_rem_2d = np.column_stack((feature[:,0],continuum_removed))
cont_r_area = self.area(cont_rem_2d)
#band-normalised by area continuum removal
cont_BNA = (1-(feature[:,1]/continuum_line))/area
#continuum removed area on left of minima
cont_area_left = self.area(cont_rem_2d[0:minima,:])
#continuum removed area on right of minima
cont_area_right = self.area(cont_rem_2d[minima:,:])
return (continuum_line,
continuum_removed,
cont_BNA,
cont_r_area,
area,
cont_area_left,
cont_area_right)
#define area of 2d polygon- using shoelace formula
def area(self, coords2d):
#setup counter
total = 0.0
#get the number of coorsinate pairs
N = coords2d.shape[0]
#iterate through these
for i in range(N):
#define the first coordinate pair
vertex1 = coords2d[i]
#do the second
vertex2 = coords2d[(i+1) % N]
#append the first & second distance to the toatal
total += vertex1[0]*vertex2[1] - vertex1[1]*vertex2[0]
#return area
return abs(total/2)
class Indices():
#class that does vegetation indices
def __init__(self,spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''So, the init method here checks the range of the sensor and runs
the appropriate indices within that range, and saves them as hdf5.
The indices are all defined as methods of this class'''
def visnir(self):
# Sensor range VIS-NIR
if self.range[0] >= 350 and \
self.range[0] <= 500 and \
self.range[1] >= 900:
vis_nir = np.column_stack((self.sr700_800(),
self.ndvi694_760(),
self.ndvi695_805(),
self.ndvi700_800(),
self.ndvi705_750(),
self.rdvi(),
self.savi(),
self.msavi2(),
self.msr(),
self.msrvi(),
self.mdvi(),
self.tvi(),
self.mtvi(),
self.mtvi2(),
self.vog1vi(),
self.vog2(),
self.prsi(),
self.privi(),
self.sipi(),
self.mcari(),
self.mcari1(),
self.mcari2(),
self.npci(),
self.npqi(),
self.cri1(),
self.cri2(),
self.ari1(),
self.ari2(),
self.wbi()))
vis_nir_hdr=str('"sr700_800",'+
'"ndvi694_760",'+
'"ndvi695_805",'+
'"ndvi700_800",'+
'"ndvi705_750",'+
'"rdvi",'+
'"savi",'+
'"msavi2",'+
'"msr",'+
'"msrvi",'+
'"mdvi",'+
'"tvi",'+
'"mtvi",'+
'"mtvi2",'+
'"vog1vi",'+
'"vog2",'+
'"prsi"'+
'"privi",'+
'"sipi",'+
'"mcari",'+
'"mcari1",'+
'"mcari2",'+
'"npci",'+
'"npqi",'+
'"cri1",'+
'"cri2",'+
'"ari1",'+
'"ari2",'+
'"wbi"')
else:
vis_nir = None
vis_nir_hdr = None
return vis_nir,vis_nir_hdr
#Range NIR-SWIR
def nir_swir(self):
if self.range[0] <= 900 and self.range[1] >=2000:
nir_swir = np.column_stack((self.ndwi(),
self.msi(),
self.ndii()))
nir_swir_hdr = str('"ndwi",'+
'"msi",'+
'"ndii"')
else:
#continue
print ('not nir-swir')
nir_swir=None
nir_swir_hdr=None
return nir_swir, nir_swir_hdr
#range SWIR
def swir(self):
if self.range[1] >=2000:
swir = np.column_stack((self.ndni(),
self.ndli()))
swir_hdr=str('"ndni",'+
'"ndli"')
else:
print ('swir-nir')
swir = None
swir_hdr = None
#continue
return swir,swir_hdr
#||||||||||||||||||||| Methods |||||||||||||||||||||||||||||||||||||||||||||||
# function to run every permutation of the NDVI type index across the Red / IR
# ...... VIS / NIR methods ....
def multi_tbvi (self, red_start=650, red_end=750, ir_start=700, ir_end=850):
# get the indicies of the regions we're going to use.
# we've added default values here, but they can happily be overidden
#start of red
red_l =np.argmin(np.abs(self.wl-red_start))
#end of red
red_r = np.argmin(np.abs(self.wl-red_end))
#start of ir
ir_l = np.argmin(np.abs(self.wl-ir_start))
#end of ir
ir_r = np.argmin(np.abs(self.wl-ir_end))
#slice
left = self.values[red_l:red_r]
right = self.values[ir_l:ir_r]
#set up output
values = np.empty(3)
#set up counter
l = 0
#loop throught the values in the red
for lvalue in left:
l_wl = self.wl[l+red_l]
r = 0
l = l+1
#then calculate the index with each wl in the NIR
for rvalue in right:
value = (rvalue-lvalue)/(rvalue+lvalue)
r_wl = self.wl[r+ir_l]
out = np.column_stack((l_wl,r_wl,value))
values = np.vstack((values, out))
out = None
r = r+1
return values[1:,:]
def sr700_800 (self, x=700, y=800):
index = self.values[np.argmin(np.abs(self.wl-x))]/self.values[np.argmin(np.abs(self.wl-y))]
return index
def ndvi705_750 (self, x=705, y=750):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi700_800 (self, x=700, y=800):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi694_760 (self, x=694, y=760):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi695_805 (self, x=695, y=805):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npci (self, x=430, y=680):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npqi (self, x=415, y=435):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
#mSRvi
#= (750-445)/(705+445)
def msrvi (self):
x = 750
y = 445
z = 705
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
msrvi_val = (x_val-y_val)/(z_val+y_val)
return msrvi_val
#Vogelmann Red Edge 1
#740/720
def vog1vi (self):
x = 740
y = 720
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
vog1vi_val = (x_val/y_val)
return vog1vi_val
#Vogelmann Red Edge 2
#= (734-747)/(715+726)
def vog2 (self):
v = 734
x = 747
y = 715
z = 726
v_val = self.values[np.argmin(np.abs(self.wl-v))]
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
vog2_val = (v_val-x_val)/(y_val+z_val)
return vog2_val
#PRI
# (531-570)/(531+570)
def privi (self):
x = 531
y = 570
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
privi_val = (x_val-y_val)/(x_val+y_val)
return privi_val
#SIPI
#(800-445)/(800-680)
def sipi (self):
x = 800
y = 445
z = 680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
sipi_val = (x_val-y_val)/(x_val+z_val)
return sipi_val
#Water band index
# WBI = 900/700
def wbi (self):
x = 900
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
wbi_val = (x_val/y_val)
return wbi_val
#mNDVI
#= (750-705)/((750+705)-(445))
def mdvi (self):
x = 750
y = 705
z = 445
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mdvi_val = (x_val-y_val)/((x_val+y_val)-z_val)
return mdvi_val
#Carotenid Reflectance Index
#CRI1 = (1/510)-(1/550)
def cri1 (self):
x = 510
y = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri1_val = (1/x_val)-(1/y_val)
return cri1_val
#CRI2 = (1/510)-(1/700)
def cri2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri2_val = (1/x_val)-(1/y_val)
return cri2_val
#Anthocyanin
#ARI1 = (1/550)-(1/700)
def ari1 (self):
x = 550
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari1_val = (1/x_val)-(1/y_val)
return ari1_val
#ARI2 = 800*((1/550)-(1/700)_))
def ari2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari2_val = 800*((1/x_val)-(1/y_val))
return ari2_val
#MSR
#=((800/670)-1)/SQRT(800+670)
def msr (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msr_val = ((x_val/y_val)-1)/(np.sqrt(x_val+y_val))
return msr_val
#SAVI
#= (1+l)(800-670)/(800+670+l)
def savi (self, l=0.5):
x = 800
y = 670
l = 0.5
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
savi_val = ((1+l)*(x_val-y_val))/(x_val+y_val+l)
return savi_val
#MSAVI
#=1/2(sqrt(2*800)+1)-SQRT(((2*800+1)sqr)-8*(800-670)
def msavi2 (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msavi2_top1 = (2*x_val+1)
msavi2_top2 = (np.sqrt(np.square(2*x_val+1)-(8*(x_val-y_val))))
msavi2_top = msavi2_top1-msavi2_top2
msavi2_val = msavi2_top/2
return msavi2_val
#Modified clhoropyll absorption indec
#MCARI = ((700-670)-0.2*(700-550))*(700/670)
def mcari (self):
x = 700
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari_val = (x_val-y_val)-(0.2*(x_val-z_val)*(x_val/y_val))
return mcari_val
#Triangular vegetation index
#TVI 0.5*(120*(750-550))-(200*(670-550))
def tvi (self):
x = 750
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
tvi_val = 0.5*((120*(x_val-y_val))-(200*(z_val+y_val)))
return tvi_val
#MCAsavRI1 = 1.2*(2.5*(800-67-)-(1.3*800-550)
def mcari1 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari1_val = (1.2*((2.5*(x_val-y_val)))-(1.3*(x_val+z_val)))
return mcari1_val
#MTVI1
#=1.2*((1.2*(800-550))-(2.5(670-550)))
def mtvi (self):
x = 800
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi_val = (1.2*(12*(x_val-y_val)))-(2.5*(z_val-y_val))
return mtvi_val
def mcari2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari2_top = (1.5*(2.5*(x_val-y_val)))-(1.3*(x_val-z_val))
mcari2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mcari2_val = mcari2_top/mcari2_btm
return mcari2_val
#MTVI2=(1.5*(2.5(800-670)-2.5*(800-550))/sqrt((2*800+1s)sq)-((6*800)-(5*sqrt670))-0.5
def mtvi2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi2_top = (1.5*(2.5*(x_val-z_val)))-(1.3*(x_val-z_val))
mtvi2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mtvi2_val = mtvi2_top/mtvi2_btm
return mtvi2_val
#Renormalised DVI
#RDVI = (800-670)/sqrt(800+670)
def rdvi (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
rdvi_val = (x_val-y_val)/np.sqrt(x_val+y_val)
return rdvi_val
#Plant senescance reflectance index
#PRSI = (680-500)/750
def prsi (self):
x = 680
y = 500
z = 750
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
prsi_val = (x_val-y_val)/z_val
return prsi_val
#||||||||||||||||||||||| SWIR methods ||||||||||||||||||||||||||||||||||||
#Cellulose Absorption Index
#CAI =0.5*(2000-2200)/2100
def cai (self):
x = 2000
y = 2200
z = 2100
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
cai_val = 0.5*(x_val-y_val)-z_val
return cai_val
#Normalized Lignin Difference
#NDLI = (log(1/1754)-log(1/1680))/(log(1/1754)+log(1/1680))
def ndli (self):
x = 1754
y = 2680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndli_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndli_val
#Canopy N
#NDNI =(log(1/1510)-log(1/1680))/(log(1/1510)+log(1/1680))
def ndni (self):
x = 1510
y = 1680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndni_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndni_val
#|||||||||||||||||||||| Full spectrum (VIS-SWIR)||||||||||||||||||||||||||||
#Normalised Difference IR index
#NDII = (819-1649)/(819+1649)#NDII = (819-1649)/(819+1649)
def ndii (self):
x = 819
y = 1649
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndii_val = (x_val-y_val)/(x_val+y_val)
return ndii_val
#Moisture Stress Index
#MSI = 1599/819http://askubuntu.com/questions/89826/what-is-tumblerd
def msi (self):
x = 1599
y = 810
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msi_val = (x_val/y_val)
return msi_val
#NDWI
#(857-1241)/(857+1241)
def ndwi (self):
x = 857
y = 1241
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndwi_val = (x_val-y_val)/(x_val+y_val)
return ndwi_val
class red_edge():
'''Class to derive red edge position using a number of different methods'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''Again, the mehtod that initialises this class uses the range of the
sensor to check to see if it falls within the red-edge reigion. If so,
it will derive the red edge using the differnet methods and save these
as seprate hdf5 datasets in the appropriate group'''
if self.range[0] <= 670 and self.range[1] >=750:
self.redge_vals = np.column_stack((self.redge_linear(),
self.redge_lagrange(),
self.redge_linear_extrapolation()))
print (self.redge_vals)
print (self.redge_linear,self.redge_lagrange,self.redge_linear_extrapolation)
self.redge_hdr = str('"linear",'+
'"lagrange",'+
'"extrapolated"')
else:
print ('red_edge out of range')
self.redge_vals = None
self.redge_hdr = None
##################### METHODS #########################################
#linear- defined by clevers et al 1994:
def redge_linear(self):
r670 = self.values[np.argmin(np.abs(self.wl-670))]
r780 = self.values[np.argmin(np.abs(self.wl-780))]
r700 = self.values[np.argmin(np.abs(self.wl-700))]
r740 = self.values[np.argmin(np.abs(self.wl-740))]
r_edge = (r670+r780)/2
lin_rep =700+40*((r_edge-r700)/(r740-r700))
print ('REDGE_LINEAR',lin_rep)
return lin_rep
#Lagrangian method, after Dawson & Curran 1998
def redge_lagrange(self):
#select the red edge region of the first derviative and associate this
#with wavelength
x = 680
y = 730
first_diff = np.diff(self.values, 1)
spec_in = np.column_stack((self.wl[1:], first_diff))
l680 = np.argmin(np.abs(spec_in[:,0]-x))
r680 = spec_in[l680,0]
l730 = np.argmin(np.abs(spec_in[:,0]-y))
r730 = spec_in[l730,0]
redge_region_sel = np.where(np.logical_and(spec_in[:,0]>r680-1,
spec_in[:,0]<r730+1))
redge_region = spec_in[redge_region_sel]
#find the maximum first derivative, return index
dif_max = np.argmax(redge_region[:,1], axis=0)
#find band with the max derivative -1, return index
dif_max_less = (np.argmax(redge_region[:,1], axis=0))-1
#find band with the max derivative +1, return index
dif_max_more = (np.argmax(redge_region[:,1], axis=0))+1
if dif_max_more >= redge_region.shape[0]:
dif_max_more = redge_region.shape[0]-1
#use these indeces to slice the array
rmax = redge_region[dif_max]
rmax_less =redge_region[dif_max_less]
rmax_more =redge_region[dif_max_more]
#lagrangian interpolation with three points
#this has been expanded to make the syntax easier
a = rmax_less[1]/(rmax_less[0]-rmax[0])*(rmax_less[0]-rmax_more[0])
b = rmax[1]/(rmax[0]-rmax_less[0])*(rmax[0]-rmax_more[0])
c = rmax_more[1]/(rmax_more[0]-rmax_less[0])*(rmax_more[0]-rmax[0])
d = a*(rmax[0]+rmax_more[0])
e = b*(rmax_less[0]+rmax_more[0])
f = c*(rmax_less[0]+rmax[0])
lg_rep = (d+e+f)/(2*(a+b+c))
print ('Lagrangian', lg_rep)
return lg_rep
#Linear extrapolation- after Cho & Skidmore 2006, Cho et al 2007
def redge_linear_extrapolation(self):
diff = np.diff(self.values)
d680 = diff[np.argmin(np.abs(self.wl-680+1))]
d694 = diff[np.argmin(np.abs(self.wl-694+1))]
d724 = diff[np.argmin(np.abs(self.wl-724+1))]
d760 = diff[np.argmin(np.abs(self.wl-760+1))]
red_slope = ((d694-d680)/(694-680))
ir_slope = ((d760-d724)/(760-724))
red_inter = d680-(red_slope*680)
ir_inter = d724-(ir_slope*724)
wl = (ir_inter-red_inter)/(ir_slope-red_slope)
print ('^!!!!!!!!! Linear:',wl)
return np.abs(wl)
class fluorescence():
'''this class is inteded to look for evidence of photosynthetic flourescence
currently this is limited to simple reflectance indices. This should be
expanded to take in other more complex methods to invesitgae fluorescence'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
print ('call to fluor')
'''The init method checks the range to establish if it overlaps with
region of chlorophyll flourescence. If so it will will perform the
analysis methods and output to hdf5'''
def wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for reflectance'''
value = self.values[np.argmin(np.abs(self.wl-x))]
return value
def d_wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for the first derivative'''
diff = np.diff(self.values)
value = diff[np.argmin(np.abs(self.wl-x))+1]
return value
def wl_max_d(self):
'''method to extract wavelength of the maxima of the first derivative
and return this'''
start = np.argmin(np.abs(self.wl-650))
end = np.argmin(np.abs(self.wl-760))
diff = np.diff(self.values[start:end])
maxdiff = np.argmax(diff)
maxdiffwl = self.wl[maxdiff+start+1]
return maxdiffwl, diff[maxdiff]
def simple_ratios(self):
''' This method runs flourescence indices ratios and returns them as a
stacked numpy array'''
#r680/r630
r680r630 = self.wl_selector(680)/self.wl_selector(630)
print (r680r630)
#r685/r630
r685r630 = self.wl_selector(685)/self.wl_selector(630)
print (r685r630)
#r685/r655
r685r655 = self.wl_selector(685)/self.wl_selector(655)
print (r685r655)
#r687/r630
r687r630 = self.wl_selector(687)/self.wl_selector(630)
print (r687r630)
#r690/r630
r690r630 = self.wl_selector(690)/self.wl_selector(630)
print (r690r630)
#r750/r800
r750r800 = self.wl_selector(750)/self.wl_selector(800)
print (r750r800)
#sq(r685)/(r675-r690)
sqr685 = np.square(self.wl_selector(685))/(self.wl_selector(675)-self.wl_selector(690))
print (sqr685)
#(r675-r690)/sq(r683) Zarco-Tejada 2000
r675r690divsq683 = (self.wl_selector(675)-self.wl_selector(690))/np.square(self.wl_selector(683))
print (r675r690divsq683)
#d705/d722
d705d722 = self.d_wl_selector(705)/self.d_wl_selector(722)
print (d705d722)
#d730/d706
d730d706 = self.d_wl_selector(730)/self.d_wl_selector(706)
print (d730d706)
#(d688-d710)/sq(d697)
d686d710sq697 = (self.d_wl_selector(688)-self.d_wl_selector(710))\
/np.square(self.d_wl_selector(697))
print (d686d710sq697)
#wl at max d / d720
maxdd720 = self.wl_max_d()[1]/self.d_wl_selector(720)
print (maxdd720)
#wl at max d / d703
maxdd703 = self.wl_max_d()[1]/self.d_wl_selector(703)
print (maxdd703)
#wl at max d / d(max d+12)
print (self.wl_max_d()[0])
maxd12 = self.wl_max_d()[1]/self.d_wl_selector(self.wl_max_d()[0]+12)
print (maxd12)
combined = np.vstack((r680r630,
r685r630,
r685r655,
r687r630,
r690r630,
r750r800,
sqr685,
r675r690divsq683,
d705d722,
d730d706,
d686d710sq697,
maxdd720,
maxdd703,
maxd12))
fluo_hdr = str('"r680r630",'+
'"r685r630",'+
'"r685r655",'+
'"r687r630",'+
'"r690r630",'+
'"r750r800",'+
'"sqr685",'+
'"r675r690divsq683",'+
'"d705d722",'+
'"d730d706",'+
'"d686d710sq697",'+
'"maxdd720",'+
'"maxdd703",'+
'"maxd12"')
return combined, fluo_hdr
def dual_peak(self):
'''This fuction loogs for a dual peak in the red-edge region. If it's
there it measures the depth of the feature between the two peaks.
UNTESTED'''
start = self.wl_selector(640)
end = self.wl_selector(740)
d1_region = np.diff(self.values[start:end])
#d2_region = np.diff(self.values[start:end], n=2)
peak_finder = find_peaks_cwt(d1_region, np.arange(3,10))
peak_wl = wavelengths[peak_finder]
fluor_peaks = []
for peak in peak_finder:
if peak_wl[peak] == self.wl[self.wl_selector(668)]:
print ('found flourescence peak at 668nm')
fluor_peaks.append(peak)
elif peak_wl[peak] == self.wl[self.wl_selector(735)]:
print ('found flourescence peak at 735nm')
fluor_peaks.append[peak]
else:
print ('unknown peak')
'''if len(fluor_peaks) == 2:
something = 'something'''
class load_asd():
def __init__(self, indir, output_dir):
data_list = os.listdir(indir)
print (data_list)
#output_dir = os.path.join(indir,'output')
if not os.path.exists(output_dir):
os.mkdir(output_dirx)
for directory in data_list:
parent = os.path.join(indir, directory)
spectra_dir = os.path.join(parent, 'raw_spectra')
reading_info_dir = os.path.join(parent, 'reading_info')
sensor_name = 'ASD FieldSpec Pro'
sensor_type = 'SPR'
sensor_units = 'nm'
sensor_range = [350,2500]
os.chdir(reading_info_dir)
reading_info_file = open('reading_atributes.txt','rb')
reading_info = csv.DictReader(reading_info_file)
reading_info_array = np.empty(12)
readings_list = [row for row in reading_info]
for reading in readings_list[:]:
reading_filename = str(reading['reading_id']+'.txt')
reading_info_line = np.column_stack((reading['reading_id'],
reading['dartField'],
reading['transect'],
reading['transectPosition'],
reading['reading_type'],
reading['reading_coord_osgb_x'],
reading['reading_coord_osgb_y'],
reading['dateOfAcquisition'],
reading['timeOfAcquisition'],
reading['instrument_number'],
reading['dark_current'],
reading['white_ref']))
#print reading_info_line
if reading['reading_type']== 'REF':
reading_info_array = np.vstack((reading_info_array,reading_info_line))
#print reading_info_array
print ('*********** Loading File', reading_filename, '***********')
os.chdir(spectra_dir)
spec = np.genfromtxt(reading_filename,
delimiter=', ',
skiprows=30)
spec = np.column_stack((spec[:,0],spec[:,1]*100))
nir_start = 0
nir_end = 990
nir_weight = 3.5
nir_k = 4.9
nir_s =45
swir1_start = 1080
swir1_end = 1438
swir1_weight = 8.5
swir1_k = 3.5
swir1_s = 35
swir2_start = 1622
swir2_end = 2149
swir2_weight = 1.2
swir2_s = 92
swir2_k = 2.8
#smoothing(perc_out, block_start, block_end, kparam, weight, sparam)
nir_smoothed = smoothing(spec, nir_start, nir_end, nir_k, nir_weight, nir_s)
swir1_smoothed = smoothing(spec, swir1_start, swir1_end, swir1_k, swir1_weight, swir1_s)
swir2_smoothed = smoothing(spec, swir2_start, swir2_end, swir2_k, swir2_weight, swir2_s)
print ('Smoothed array shape', nir_smoothed.shape,swir1_smoothed.shape,swir2_smoothed.shape)
nir_swir_gap = interpolate_gaps(nir_smoothed,swir1_smoothed)
swir2_gap = interpolate_gaps(swir1_smoothed,swir2_smoothed)
spec_smoothed = np.vstack((nir_smoothed,
nir_swir_gap,
swir1_smoothed,
swir2_gap,
swir2_smoothed))
print ('Spec SHAPE:', spec.shape)
survey_dir = os.path.join(output_dir, directory)
if not os.path.exists(survey_dir):
os.mkdir(survey_dir)
os.chdir(survey_dir)
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
print (abs470.abs_feature()[0])
abs470_ftdef = abs470.abs_feature()[0]
print (abs470_ftdef)
abs470_crem = abs470.abs_feature()[2]
if not abs470_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs470_ftdef.txt',
abs470_ftdef,
header=abs470.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs470_crem.txt',
abs470_crem,
delimiter=',')
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
if not abs670_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs670_ftdef.txt',
abs670_ftdef,
header=abs670.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs670_crem.txt',
abs670_crem,
delimiter=',')
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1115,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
if not abs970_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs970_ftdef.txt',
abs970_ftdef,
header=abs970.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs970_crem.txt',
abs970_crem,
delimiter=',')
except:
pass
try:
abs1200 = absorption_feature(spec_smoothed,1080,1300,1190)
abs1200_ftdef = abs1200.abs_feature()[0]
abs1200_crem = abs1200.abs_feature()[2]
if not abs1200_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1200_ftdef.txt',
abs1200_ftdef,
header=abs1200.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1200_crem.txt',
abs1200_crem,
delimiter=',')
except:
pass
try:
abs1730 = absorption_feature(spec_smoothed,1630,1790,1708)
abs1730_ftdef = abs1730.abs_feature()[0]
abs1730_crem = abs1730.abs_feature()[2]
if not abs1730_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1730_ftdef.txt',
abs1730_ftdef,
header=abs1730.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1730_crem.txt',
abs1730_crem,
delimiter=',')
except:
pass
print (spec_smoothed.shape)
try:
abs2100 = absorption_feature(spec_smoothed,2001,2196,2188)
abs2100_ftdef = abs2100.abs_feature()[0]
abs2100_crem = abs2100.abs_feature()[2]
if not abs2100_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs2100_ftdef.txt',
abs2100_ftdet,
header=abs2100.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs2100_crem.txt',
abs2100_crem,
delimiter=',')
except:
pass
veg_indices = Indices(spec_smoothed)
indices = np.column_stack((veg_indices.visnir()[0],
veg_indices.nir_swir()[0],
veg_indices.swir()[0]))
print (veg_indices.visnir()[1],veg_indices.nir_swir()[1],veg_indices.swir()[1])
hdr = str(veg_indices.visnir()[1]+','+veg_indices.nir_swir()[1]+','+veg_indices.swir()[1])
np.savetxt(reading_filename[0:-4]+'_indices.txt',
indices,
header=hdr,
delimiter=',')
mtbvi = veg_indices.multi_tbvi()
np.savetxt(reading_filename[0:-4]+'_mtbvi.txt',
mtbvi,
delimiter=',')
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
print (redge.redge_vals)
np.savetxt(reading_filename[0:-4]+'_redge.txt',
redge.redge_vals,
delimiter=',')
fluo = fluorescence(spec_smoothed)
np.savetxt(reading_filename[0:-4]+'_flou.txt',
np.transpose(fluo.simple_ratios()[0]),
header = fluo.simple_ratios()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_spec.txt',
spec_smoothed,
delimiter=',')
class load_image():
def __init__(self, wavlengths_dir,image_dir,out_dir):
os.chdir(wavelengths_dir)
wavelengths = np.genfromtxt('wavelengths.txt')
print ('wavelengths array', wavelengths)
os.chdir(image_dir)
image_list = os.listdir(image_dir)
for image in image_list:
import_image = self.get_image(image)
image_name = image[:-4]
print ('IMAGE NAME:', image_name)
row = 1
img_array = import_image[0]
print ('Image_array', img_array)
projection = import_image[1]
print ('Projection',projection)
x_size = import_image[2]
print ('Xdim',x_size)
y_size = import_image[3]
print ('Ydim', y_size)
spatial = import_image[4]
print (spatial)
x_top_left = spatial[0]
ew_pix_size = spatial[1]
rotation_ew = spatial[2]
y_top_left = spatial[3]
rotation_y = spatial[4]
ns_pixel_size = spatial[5]
print ('Spatial', x_top_left,ew_pix_size,rotation_ew,y_top_left,rotation_y,ns_pixel_size)
print ('IMAGE ARRAY SHAPE',img_array.shape)
img_dims = img_array.shape
print (img_dims[0],'/',img_dims[1])
#indices+29
indices_out = np.zeros((img_dims[0],img_dims[1],29), dtype=np.float32)
#print indices_out
#redge=3
redge_out = np.zeros((img_dims[0],img_dims[1]),dtype=np.float32)
#fluo=14
fluo_out=np.zeros((img_dims[0],img_dims[1],14), dtype=np.float32)
print ('fluo out', fluo_out.shape)
ft470_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft670_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft970_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
x470 = np.argmin(np.abs(wavelengths-400))
y470 = np.argmin(np.abs(wavelengths-518))
len470 = y470-x470
cr470_out = np.zeros((img_dims[0],img_dims[1],len470), dtype=np.float32)
x670 = np.argmin(np.abs(wavelengths-548))
y670 = np.argmin(np.abs(wavelengths-800))
len670 = y670-x670
cr670_out = np.zeros((img_dims[0],img_dims[1],len670), dtype=np.float32)
print (cr670_out)
x970 = np.argmin(np.abs(wavelengths-880))
y970 = np.argmin(np.abs(wavelengths-1000))
len970 = y970-x970
cr970_out = np.zeros((img_dims[0],img_dims[1],len970), dtype=np.float32)
#print cr970_out
print (wavelengths)
row = 0
print ('***', row, img_dims[0])
for i in range(0,img_dims[0]):
print (i)
column = 0
#print 'COL',column
for j in range(0,img_dims[1]):
print ('COLUMN',column)
#print 'Pixel',pixel
name = '%s_pix-%s_%s' % (image_name,row,column)
print ('NAME',name)
pixel = img_array[row,column,:]
#smoothed = savgol_filter(pixel,5,2)
#spec_smoothed = np.column_stack((wavelengths,smoothed))
spec_smoothed = np.column_stack((wavelengths,pixel))
print (spec_smoothed)
veg_indices = Indices(spec_smoothed)
indices = veg_indices.visnir()[0]
print ('(*&)(*)(*&&^)^)^)*&^)*^)*&', indices)
indices_out[row,column,:]=indices
fluo = fluorescence(spec_smoothed)
fluo_out[row,column,:]=np.transpose(fluo.simple_ratios()[0])
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
redge_out[row,column]= redge.redge_vals[0,2]
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
abs470_ftdef = abs470.abs_feature()[0]
abs470_crem = abs470.abs_feature()[2]
abs470_crem = np.column_stack((abs470_crem[:,0],abs470_crem[:,4]))
print ('!*!*!*!*!&!*!*', abs470_crem)
crem470_fill = self.crem_fill(x470,y470,abs470_crem,wavelengths)
ft470_out[row,column,:]=abs470_ftdef
cr470_out[row,column,:]=crem470_fill
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
abs670_crem = np.column_stack((abs670_crem[:,0],abs670_crem[:,4]))
ft670_out[row,column,:]=abs670_ftdef
crem670_fill = self.crem_fill(x670,y670,abs670_crem,wavelengths)
cr670_out[row,column,:]=crem670_fill
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1000,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
abs970_crem = np.column_stack((abs970_crem[:,0],abs970_crem[:,4]))
crem970_fill = self.crem_fill(x970,y970,abs970_crem,wavelengths)
ft970_out[row,column,:]=abs970_ftdef
cr970_out[row,column,:]=crem970_fill
except:
pass
column = column+1
print (pixel.shape)
row = row+1
self.writeimage(out_dir,image+'_indices.tif',indices_out,spatial)
self.writeimage(out_dir,image+'_fluo.tif',fluo_out,spatial)
self.writeimage(out_dir,image+'_redge.tif',redge_out,spatial)
self.writeimage(out_dir,image+'_ft470.tif',ft470_out,spatial)
self.writeimage(out_dir,image+'_cr470.tif',cr470_out,spatial)
self.writeimage(out_dir,image+'_ft670.tif',ft670_out,spatial)
self.writeimage(out_dir,image+'_cr670.tif',cr670_out,spatial)
self.writeimage(out_dir,image+'_ft970.tif',ft970_out,spatial)
self.writeimage(out_dir,image+'_cr970.tif',cr970_out,spatial)
def crem_fill(self,xwl,ywl,bna,wavelengths):
bna_out=np.zeros((ywl-xwl))
bna_wvl = bna[:,0]
bna_refl= bna[:,1]
full_wl = wavelengths[xwl:ywl]
index = np.argmin(np.abs(wavelengths-bna_wvl[0]))
bna_out[index:]=bna_refl
return bna_out
def get_image(self, image):
print ('call to get_image')
# open the dataset
dataset = gdal.Open(image, GA_ReadOnly)
print ('Dataset',dataset)
# if there's nothign there print error
if dataset is None:
print ('BORK: Could not load file: %s' %(image))
# otherwise do stuff
else:
#get the format
driver = dataset.GetDriver().ShortName
#get the x dimension
xsize = dataset.RasterXSize
#get the y dimension
ysize = dataset.RasterYSize
#get the projection
proj = dataset.GetProjection()
#get the number of bands
bands = dataset.RasterCount
#get the geotransform Returns a list object. This is standard GDAL ordering:
#spatial[0] = top left x
#spatial[1] = w-e pixel size
#spatial[2] = rotation (should be 0)
#spatial[3] = top left y
#spatial[4] = rotation (should be 0)
#spatial[5] = n-s pixel size
spatial = dataset.GetGeoTransform()
#print some stuff to console to show we're paying attention
print ('Found raster in %s format. Raster has %s bands' %(driver,bands))
print ('Projected as %s' %(proj))
print ('Dimensions: %s x %s' %(xsize,ysize))
#instantiate a counter
count = 1
#OK. This is the bit that catually loads the bands in in a while loop
# Loop through bands as long as count is equal to or less than total
while (count<=bands):
#show that your computer's fans are whining for a reason
print ('Loading band: %s of %s' %(count,bands))
#get the band
band = dataset.GetRasterBand(count)
# load this as a numpy array
data_array = band.ReadAsArray()
'''data_array = ma.masked_where(data_array == 0, data_array)
data_array = data_array.filled(-999)'''
data_array = data_array.astype(np.float32, copy=False)
# close the band object
band = None
#this bit stacks the bands into a combined numpy array
#if it's the first band copy the array directly to the combined one
if count == 1:
stacked = data_array
#else combine these
else:
stacked = np.dstack((stacked,data_array))
#stacked = stacked.filled(-999)
#just to check it's working
#print stacked.shape
# increment the counter
count = count+1
#stacked = stacked.astype(np.float32, copy=False)
return stacked,proj,xsize,ysize,spatial
def writeimage(self,
outpath,
outname,
image,
spatial):
data_out = image
print ('ROWS,COLS',image.shape)
print ('Call to write image')
os.chdir(outpath)
print ('OUTPATH',outpath)
print ('OUTNAME',outname)
#load the driver for the format of choice
driver = gdal.GetDriverByName("Gtiff")
#create an empty output file
#get the number of bands we'll need
try:
bands = image.shape[2]
except:
bands=1
print ('BANDS OUT', bands)
#file name, x columns, y columns, bands, dtype
out = driver.Create(outname, image.shape[1], image.shape[0], bands, gdal.GDT_Float32)
#define the location using coords of top-left corner
# minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation
out.SetGeoTransform(spatial)
srs = osr.SpatialReference()
#get the coodrinate system using the ESPG code
srs.SetWellKnownGeogCS("EPSG:27700")
#set pstackedstackedstackedtojection of output file
out.SetProjection(srs.ExportToWkt())
band = 1
if bands == 1:
out.GetRasterBand(band).WriteArray(data_out)
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
else:
while (band<=bands):
data = data_out[:,:,band-1]
#write values to empty array
out.GetRasterBand(band).WriteArray( data )
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
band = band+1
out = None
print ('Processing of %s complete' % (outname))
return outname
if __name__ == "__main__":
#dir_path = os.path.dirname(os.path.abspath('...'))
#data_root = os.path.join(dir_path, 'data')
data_root = '/home/dav/data/temp/test/test_spec'
for folder in os.listdir(data_root):
input_dir = os.path.join(data_root,folder)
print (input_dir)
surveys_list = os.listdir(input_dir)
print (surveys_list)
for survey_dir in surveys_list:
print (survey_dir)
site_dir=os.path.join(input_dir,survey_dir)
print (site_dir)
image_path = os.path.join(site_dir, 'image')
print (image_path)
wavelengths_dir = os.path.join(site_dir, 'wavelengths')
print (wavelengths_dir)
out_dir = os.path.join(site_dir,'output')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
load_image(wavelengths_dir,image_path,out_dir) | mit |
alvarofierroclavero/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
annahs/atmos_research | LEO_2D_histos_from_db.py | 1 | 3992 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = datetime.strptime('20120401','%Y%m%d')
end_date = datetime.strptime('20120531','%Y%m%d')
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2.lupckl'
rBC_density = 1.8
incand_sat = 3750
LF_max = 45000 #above this is unreasonable
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
min_rBC_mass = 1.63#120 2.6-#140 3.86-#160nm 0.25
max_rBC_mass = 2.6#140 3.86-160 5.5-#180nm 10.05
VED_min = 65
VED_max = 220
scat_lim = 100
begin_data = calendar.timegm(start_date.timetuple())
end_data = calendar.timegm(end_date.timetuple())
data = []
particles=0
no_scat=0
no_scat_110 =0
fit_failure=0
early_evap=0
early_evap_110=0
flat_fit=0
LF_high=0
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
if meas_scat_amp < 6:
no_scat +=1
if rBC_VED > scat_lim:
no_scat_110+=1
data.append([rBC_VED,coat_thickness])
if LEO_amp == 0.0 and LF_baseline_pctdiff == None and meas_scat_amp >= 6:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -2:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -1:
fit_failure +=1
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
if LEO_amp > LF_max:
LF_high +=1
if LEO_amp > 0:
data.append([rBC_VED,coat_thickness])
print '# of particles', particles
print 'no_scat', no_scat
print 'no_scat_110', no_scat_110
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'early_evap_110', early_evap_110
print 'flat_fit', flat_fit
print 'LF_high', LF_high
evap_pct = (early_evap)*100.0/particles
evap_pct_110 = (early_evap_110)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
no_scat_pct_110 = no_scat_110*100./particles
print evap_pct, evap_pct_110, no_scat_pct,no_scat_pct_110
rBC_VEDs = [row[0] for row in data]
coatings = [row[1] for row in data]
median_coat = np.median (coatings)
print 'median coating',median_coat
#####hexbin coat vs core###
fig = plt.figure()
ax = fig.add_subplot(111)
#x_limits = [0,250]
#y_limits = [0,250]
#h = plt.hexbin(rBC_VEDs, coatings, cmap=cm.jet,gridsize = 50, mincnt=1)
hist = plt.hist(coatings, bins=50)
plt.xlabel('frequency')
plt.xlabel('Coating Thickness (nm)')
#cb = plt.colorbar()
#cb.set_label('frequency')
plt.show()
| mit |
DTUWindEnergy/Python4WindEnergy | lesson 3/results/ebra.py | 1 | 8402 | # -*- coding: utf-8 -*- <nbformat>3.0</nbformat>
# <headingcell level=1>
# Plotting with Matplotlib
# <headingcell level=2>
# Prepare for action
# <codecell>
import numpy as np
import scipy as sp
import sympy
# Pylab combines the pyplot functionality (for plotting) with the numpy
# functionality (for mathematics and for working with arrays) in a single namespace
# aims to provide a closer MATLAB feel (the easy way). Note that his approach
# should only be used when doing some interactive quick and dirty data inspection.
# DO NOT USE THIS FOR SCRIPTS
#from pylab import *
# the convienient Matplotib plotting interface pyplot (the tidy/right way)
# use this for building scripts. The examples here will all use pyplot.
import matplotlib.pyplot as plt
# for using the matplotlib API directly (the hard and verbose way)
# use this when building applications, and/or backends
import matplotlib as mpl
# <markdowncell>
# How would you like the IPython notebook show your plots? In order to use the
# matplotlib IPython magic youre IPython notebook should be launched as
#
# ipython notebook --matplotlib=inline
#
# Make plots appear as a pop up window, chose the backend: 'gtk', 'inline', 'osx', 'qt', 'qt4', 'tk', 'wx'
#
# %matplotlib qt
#
# or inline the notebook (no panning, zooming through the plot). Not working in IPython 0.x
#
# %matplotib inline
#
# <codecell>
# activate pop up plots
#%matplotlib qt
# or change to inline plots
# %matplotlib inline
# <headingcell level=3>
# Matplotlib documentation
# <markdowncell>
# Finding your own way (aka RTFM). Hint: there is search box available!
#
# * http://matplotlib.org/contents.html
#
# The Matplotlib API docs:
#
# * http://matplotlib.org/api/index.html
#
# Pyplot, object oriented plotting:
#
# * http://matplotlib.org/api/pyplot_api.html
# * http://matplotlib.org/api/pyplot_summary.html
#
# Extensive gallery with examples:
#
# * http://matplotlib.org/gallery.html
# <headingcell level=3>
# Tutorials for those who want to start playing
# <markdowncell>
# If reading manuals is too much for you, there is a very good tutorial available here:
#
# * http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb
#
# Note that this tutorial uses
#
# from pylab import *
#
# which is usually not adviced in more advanced script environments. When using
#
# import matplotlib.pyplot as plt
#
# you need to preceed all plotting commands as used in the above tutorial with
#
# plt.
# <markdowncell>
# Give me more!
#
# [EuroScipy 2012 Matlotlib tutorial](http://www.loria.fr/~rougier/teaching/matplotlib/). Note that here the author uses ```from pylab import * ```. When using ```import matplotliblib.pyplot as plt``` the plotting commands need to be proceeded with ```plt.```
# <headingcell level=2>
# Plotting template starting point
# <codecell>
# some sample data
x = np.arange(-10,10,0.1)
# <markdowncell>
# To change the default plot configuration values.
# <codecell>
page_width_cm = 13
dpi = 200
inch = 2.54 # inch in cm
# setting global plot configuration using the RC configuration style
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=12) # tick labels
plt.rc('ytick', labelsize=20) # tick labels
plt.rc('axes', labelsize=20) # axes labels
# If you don’t need LaTeX, don’t use it. It is slower to plot, and text
# looks just fine without. If you need it, e.g. for symbols, then use it.
#plt.rc('text', usetex=True) #<- P-E: Doesn't work on my Mac
# <codecell>
# create a figure instance, note that figure size is given in inches!
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
# set the big title (note aligment relative to figure)
fig.suptitle("suptitle 16, figure alignment", fontsize=16)
# actual plotting
ax.plot(x, x**2, label="label 12")
# set axes title (note aligment relative to axes)
ax.set_title("title 14, axes alignment", fontsize=14)
# axes labels
ax.set_xlabel('xlabel 12')
ax.set_ylabel(r'$y_{\alpha}$ 12', fontsize=8)
# legend
ax.legend(fontsize=12, loc="best")
# saving the figure in different formats
# fig.savefig('figure-%03i.png' % dpi, dpi=dpi)
# fig.savefig('figure.svg')
# fig.savefig('figure.eps')
# <codecell>
# following steps are only relevant when using figures as pop up windows (with %matplotlib qt)
# to update a figure with has been modified
fig.canvas.draw()
# show a figure
fig.show()
# <headingcell level=2>
# Exercise
# <markdowncell>
# The current section is about you trying to figure out how to do several plotting features. You should use the previously mentioned resources to find how to do that. In many cases, google is your friend!
# <markdowncell>
# * add a grid to the plot
# <codecell>
plt.plot(x,x**2)
plt.grid('on')
# <markdowncell>
# * change the location of the legend to different places
# <codecell>
plt.plot(x,x**2, label="label 12")
plt.legend(fontsize=12, loc="upper right")
# <markdowncell>
# * find a way to control the line type and color, marker type and color, control the frequency of the marks (`markevery`). See plot options at: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
# <codecell>
stride = max( int(len(x) / 20), 1)
plt.plot(x,x**2, 'ko-',color='forestgreen', markevery=stride,label="label 12")
plt.legend(fontsize=12, loc="upper center")
# <markdowncell>
# * add different sub-plots
# <codecell>
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
# <markdowncell>
# * size the figure such that when included on an A4 page the fonts are given in their true size
# <codecell>
# matplotlib.rcParams.update({'font.size': 22})
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
fig.set_size_inches(8.2,3) # using A4 width in inches?
fig.set_dpi(100)
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
# ax[0].set('xtick', labelsize=12) # tick labels
# .rc('ytick', labelsize=20) # tick labels
# .rc('axes', labelsize=20) # axes labels
# fig.savefig('figure.pdf')
# <markdowncell>
# * make a contour plot
# <codecell>
X, Y = np.meshgrid(x,x)
plt.figure()
plt.contourf(X,Y,X*Y,linewidth=0.3,cmap=plt.get_cmap('hsv'),levels=np.arange(-1,1,0.1))
plt.show
# im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)
# <markdowncell>
# * use twinx() to create a second axis on the right for the second plot
# <codecell>
plt.figure()
ax=plt.gca()
ax.plot(x,x**2)
ax2 = ax.twinx()
ax2.plot(x,x**4, 'r')
# <markdowncell>
# * add horizontal and vertical lines using axvline(), axhline()
# <codecell>
plt.figure()
plt.plot(x,x**2)
plt.axvline(2)
plt.axhline(10)
# <markdowncell>
# * autoformat dates for nice printing on the x-axis using fig.autofmt_xdate()
# <codecell>
import datetime
dates = np.array([datetime.datetime.now() + datetime.timedelta(days=i) for i in xrange(24)])
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(dates,xrange(24))
fig.autofmt_xdate()
# <headingcell level=2>
# Advanced exercises
# <markdowncell>
# We are going to play a bit with regression
# <markdowncell>
# * Create a vector x of equally spaced number between $x \in [0, 5\pi]$ of 1000 points (keyword: linspace)
# <codecell>
n=1000
x=np.linspace(0,5*np.pi,n)
# <markdowncell>
# * create a vector y, so that y=sin(x) with some random noise
# <codecell>
y = np.sin(x) +np.random.rand(n)-0.5
yth = np.sin(x)
# <markdowncell>
# * plot it like this: ![test](files/plt1.png)
# <codecell>
fig=plt.figure()
ax=plt.gca()
ax.plot(x,y,'b.')
ax.plot(x,yth,'k--',label=r'$y=sin(x)$')
# <markdowncell>
# Try to do a polynomial fit on y(x) with different polynomial degree (Use numpy.polyfit to obtain coefficients)
#
# Plot it like this (use np.poly1d(coef)(x) to plot polynomials) ![test](files/plt2.png)
# <codecell>
for order in xrange(9):
coeff=np.polyfit(x,y,order)
ax.plot(x,np.poly1d(coeff)(x),label='deg %d'%order)
# shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# <codecell>
| apache-2.0 |
ilo10/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/comparing_explore_exploit_methods.py | 1 | 2913 | import numpy as np
import matplotlib.pyplot as plt
from comparing_epsilons import Bandit
from optimistic_initial_values import run_experiment as run_experiment_oiv
from ucb1 import run_experiment as run_experiment_ucb
class BayesianBandit:
def __init__(self, true_mean):
self.true_mean = true_mean
# parameters for mu - prior is N(0,1)
self.predicted_mean = 0
self.lambda_ = 1
self.sum_x = 0 # for convenience
self.tau = 1
def pull(self):
return np.random.randn() + self.true_mean
def sample(self):
return np.random.randn() / np.sqrt(self.lambda_) + self.predicted_mean
def update(self, x):
self.lambda_ += self.tau
self.sum_x += x
self.predicted_mean = self.tau*self.sum_x / self.lambda_
def run_experiment_decaying_epsilon(m1, m2, m3, N):
bandits = [Bandit(m1), Bandit(m2), Bandit(m3)]
data = np.empty(N)
for i in range(N):
# epsilon greedy
p = np.random.random()
if p < 1.0/(i+1):
j = np.random.choice(3)
else:
j = np.argmax([b.mean for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
for b in bandits:
print(b.mean)
return cumulative_average
def run_experiment(m1, m2, m3, N):
bandits = [BayesianBandit(m1), BayesianBandit(m2), BayesianBandit(m3)]
data = np.empty(N)
for i in range(N):
# optimistic initial values
j = np.argmax([b.sample() for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
return cumulative_average
if __name__ == '__main__':
m1 = 1.0
m2 = 2.0
m3 = 3.0
eps = run_experiment_decaying_epsilon(m1, m2, m3, 100000)
oiv = run_experiment_oiv(m1, m2, m3, 100000)
ucb = run_experiment_ucb(m1, m2, m3, 100000)
bayes = run_experiment(m1, m2, m3, 100000)
# log scale plot
plt.plot(eps, label='decaying-epsilon-greedy')
plt.plot(oiv, label='optimistic')
plt.plot(ucb, label='ucb1')
plt.plot(bayes, label='bayesian')
plt.legend()
plt.xscale('log')
plt.show()
# linear plot
plt.plot(eps, label='decaying-epsilon-greedy')
plt.plot(oiv, label='optimistic')
plt.plot(ucb, label='ucb1')
plt.plot(bayes, label='bayesian')
plt.legend()
plt.show()
| apache-2.0 |
swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/smooth histogram method/05.py | 1 | 1812 | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import swhlab.common as cm
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def analyzeSweep(abf,plotToo=True,color=None,label=None):
Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:]
AV,SD=np.average(Y),np.std(Y)
dev=5 # number of stdevs from the avg to set the range
R1,R2=[(AV-SD)*dev,(AV+SD)*dev]
nBins=1000
hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True)
histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5))
if plotToo:
plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10)
plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label)
return
if __name__=="__main__":
#abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf"
abf=swhlab.ABF(abfFile)
# prepare figure
plt.figure(figsize=(10,10))
plt.grid()
plt.title("smart baseline value distribution")
plt.xlabel(abf.units2)
plt.ylabel("normalized density")
# do the analysis
abf.kernel=abf.kernel_gaussian(sizeMS=500)
abf.setsweep(175)
analyzeSweep(abf,color='b',label="baseline")
abf.setsweep(200)
analyzeSweep(abf,color='g',label="TGOT")
abf.setsweep(375)
analyzeSweep(abf,color='y',label="washout")
# show figure
plt.legend()
plt.margins(0,.1)
plt.show()
print("DONE")
| mit |
rsignell-usgs/PySeidon | pyseidon/tidegaugeClass/plotsTidegauge.py | 2 | 1096 | #!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
import seaborn
class PlotsTidegauge:
"""'Plots' subset of Tidegauge class gathers plotting functions"""
def __init__(self, variable, debug=False):
self._var = variable
def plot_xy(self, x, y, title=' ', xLabel=' ', yLabel=' '):
"""
Simple X vs Y plot
Inputs:
------
- x = 1D array
- y = 1D array
"""
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
self._fig = plt.plot(x, y, label=title)
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
plt.ylabel(yLabel)
plt.xlabel(xLabel)
#plt.legend()
plt.show()
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
| agpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/simple_plot.py | 1 | 1292 | """
===========
Simple Plot
===========
Create a simple plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
# Note that using plt.subplots below is equivalent to using
# fig = plt.figure and then ax = fig.add_subplot(111)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
pltshow(plt)
| mit |
rubennj/pvlib-python | docs/sphinx/sphinxext/numpydoc/docscrape_sphinx.py | 41 | 9437 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/tests/test_pipeline.py | 23 | 15392 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA(svd_solver='full')
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e351.py | 2 | 6885 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
# subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
jblackburne/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/datasets/lfw.py | 38 | 19042 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 50 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
cyliustack/sofa | bin/sofa_analyze.py | 1 | 50661 | import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % (64, rand(), rand())
def get_top_k_events(cfg, df, topk):
topk_events=[]
gby = df.groupby(['name'])
df_agg = gby.aggregate(np.sum)
df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False)
#memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_']
if cfg.verbose:
print("Top %d Events: "%topk)
print(df_agg_sorted[['duration']][0:topk])
eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist()
return eventName
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
def get_hint(potato_server, features):
if len(features) > 0:
pfv = potato_pb2.PerformanceFeatureVector()
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
#print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
pfv.name.append(name)
pfv.value.append(value)
#print('Wait for response from POTATO server...')
myhostname = socket.gethostname()
channel = grpc.insecure_channel(potato_server)
stub = potato_pb2_grpc.HintStub(channel)
request = potato_pb2.HintRequest( hostname = myhostname,
pfv = pfv)
response = stub.Hint(request)
hint = response.hint
docker_image = response.docker_image
else:
hint = 'There is no pfv to get hints.'
docker_image = 'NA'
return hint, docker_image
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
if cfg.verbose:
print_title('Concurrency Breakdown Analysis')
total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
total_interval_vector = []
total_performace_vector = []
if len(df_mpstat) == 0:
print_warning(cfg, 'no mpstat and perf traces!')
return features
t_begin = df_mpstat.iloc[0]['timestamp']
t_end = df_mpstat.iloc[-1]['timestamp']
t = t_begin
sample_time = (1 / float(cfg.sys_mon_rate))
while t < t_end:
t = t + sample_time
if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
continue
window_begin = t - sample_time
window_end = t
if len(df_cpu) > 0:
if df_cpu.iloc[0].timestamp > window_end:
continue
cond1 = (df_cpu['timestamp'] > window_begin)
cond2 = (df_cpu['timestamp'] <= window_end)
df_cpu_interval = df_cpu[ cond1 & cond2 ]
num_gpus = len(list(set(df_nvsmi['deviceId'])))
cond1 = (df_nvsmi['timestamp'] > window_begin)
cond2 = (df_nvsmi['timestamp'] <= window_end)
sm = df_nvsmi['event'] == int(0)
df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
cond1 = (df_mpstat['timestamp'] > window_begin)
cond2 = (df_mpstat['timestamp'] <= window_end)
df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
cond1 = (df_bandwidth['timestamp'] > window_begin)
cond2 = (df_bandwidth['timestamp'] <= window_end)
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]
mp_usr = []
mp_sys = []
mp_idl = []
mp_iow = []
usr = []
sys = []
irq = []
cpu_max = 0
cpu_min = 100
for i in range(len(df_mpstat_interval)):
ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
#print(ratios)
mp_usr.append(sample_time*int(ratios[1])/100.0)
mp_sys.append(sample_time*int(ratios[2])/100.0)
mp_idl.append(sample_time*int(ratios[3])/100.0)
mp_iow.append(sample_time*int(ratios[4])/100.0)
usr.append(int(ratios[1]))
sys.append(int(ratios[2]))
irq.append(int(ratios[5]))
cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
if cpu_tmp > cpu_max:
cpu_max = cpu_tmp
if cpu_tmp < cpu_min:
cpu_min = cpu_tmp
mp_usr = np.asarray(mp_usr)
mp_sys = np.asarray(mp_sys)
mp_idl = np.asarray(mp_idl)
mp_iow = np.asarray(mp_iow)
usr = np.asarray(usr)
sys = np.asarray(sys)
irq = np.asarray(irq)
elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
if len(df_mpstat_interval) > 0:
elapsed_time['usr'] = mp_usr.max()
elapsed_time['sys'] = mp_sys.max()
elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time
elapsed_time['iow'] = mp_iow.max()
#print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr'])
dominator = max(elapsed_time, key=elapsed_time.get)
#if elapsed_time['gpu'] > 0.1 :
# dominator = 'gpu'
if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100:
total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time
else:
total_elapsed_time['idl'] += sample_time
if num_gpus > 0:
time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus
else:
time_gpu_avg = 0
interval_vector = [mp_usr.max(),
mp_sys.max(),
mp_iow.max(),
mp_idl.max(),
time_gpu_avg,
df_tx_interval['bandwidth'].sum(),
df_rx_interval['bandwidth'].sum()]
total_interval_vector.append(tuple(interval_vector))
if num_gpus > 0:
sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
else:
sm_avg = 0
performace_vector = [window_end,
df_nvsmi_interval['duration'].max(),
sm_avg,
df_nvsmi_interval['duration'].min(),
round((usr.mean() + sys.mean() + irq.mean()), 0),
cpu_max,
cpu_min]
total_performace_vector.append(tuple(performace_vector))
total_all_elapsed_time = sum(total_elapsed_time.values())
if total_all_elapsed_time > 0 :
elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time
elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time
elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time
elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time
elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time
if cfg.verbose:
print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
if num_gpus > 0:
print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
print('IDL = %.1lf %%' % elapsed_time_ratio['idl'])
print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
if cfg.spotlight_gpu:
elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin
else:
elapsed_hotspot_time = 0
df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio',
'elapsed_iow_time_ratio', 'elapsed_hotspot_time'],
'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'],
elapsed_time_ratio['iow'], elapsed_hotspot_time ] },
columns=['name','value'])
features = pd.concat([features, df])
if len(total_performace_vector) > 0:
performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
performance_table.to_csv('%s/performance.csv' % logdir)
vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx'])
pearson = vector_table.corr(method ='pearson').round(2)
if cfg.verbose:
print('Correlation Table :')
print(pearson)
df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
features = pd.concat([features, df])
return features
def payload_sum(df):
print((len(df)))
class Event:
def __init__(self, name, ttype, timestamp, duration):
self.name = name
self.ttype = ttype # 0 for begin, 1 for end
self.timestamp = timestamp
self.duration = duration
def __repr__(self):
return repr((self.name, self.ttype, self.timestamp, self.duration))
def nvsmi_profile(logdir, cfg, df_nvsmi, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('SM & MEM & ENCODE/DECODE Profiling')
if cfg.spotlight_gpu:
if cfg.roi_end == 0 :
print_warning(cfg, 'spotlight_gpu has no effects.')
else:
cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin)
cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end)
df_nvsmi = df_nvsmi[ cond1 & cond2 ]
sm_start = df_nvsmi.iloc[0].timestamp
sm_end = df_nvsmi.iloc[-1].timestamp
SM_time = sm_end - sm_start
result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean()
result = result.astype(int)
gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0]
gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1]
if cfg.nvsmi_data:
gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2]
gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3]
else:
gpu_enc_util = 0
gpu_dec_util = 0
sm = df_nvsmi['event'] == int(0)
mem = df_nvsmi['event'] == int(1)
enc = df_nvsmi['event'] == int(2)
dec = df_nvsmi['event'] == int(3)
gpunum = list(set(df_nvsmi['deviceId']))
res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec'])
sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
for i in gpunum:
gpuid = df_nvsmi['deviceId'] == int(i)
gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2),
round(df_nvsmi[enc & gpuid]['duration'].mean(), 2),
round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)]
smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)]
memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)]
gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i])
sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
res = pd.concat([res, gpu_tmp])
sm_q = pd.concat([sm_q, sm_tmp])
mem_q = pd.concat([mem_q, mem_tmp])
res.index.name = 'gpu_id'
sm_q.index.name = 'gpu_id'
mem_q.index.name = 'gpu_id'
if not cfg.cluster_ip and cfg.verbose:
print('GPU Utilization (%):')
print(res)
print('\nGPU SM Quartile (%):')
print(sm_q)
print('\nGPU MEM Quartile (%):')
print(mem_q)
print('Overall Average SM Utilization (%): ', int(gpu_sm_util))
print('Overall Average MEM Utilization (%): ', int(gpu_mem_util))
print('Overall Average ENC Utilization (%): ', int(gpu_enc_util))
print('Overall Average DEC Utilization (%): ', int(gpu_dec_util))
print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0))
df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'],
'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5),
df_nvsmi[sm & gpuid]['duration'].quantile(0.75),
int(gpu_sm_util),
df_nvsmi[mem & gpuid]['duration'].quantile(0.5),
df_nvsmi[mem & gpuid]['duration'].quantile(0.75),
int(gpu_mem_util),
]},
columns=['name','value'])
features = pd.concat([features, df])
return features
def gpu_profile(logdir, cfg, df_gpu, features):
if cfg.verbose:
print_title('GPU Profiling')
print('Per-GPU time (s):')
groups = df_gpu.groupby("deviceId")["duration"]
gpu_time = 0
for key, item in groups:
gpuid = int(float(key))
per_gpu_time = groups.get_group(key).sum()
if cfg.verbose:
print("[%d]: %lf" % (gpuid, per_gpu_time))
gpu_time = gpu_time + per_gpu_time
num_gpus = len(groups)
kernel_time = 0
grouped_df = df_gpu.groupby("copyKind")["duration"]
for key, item in grouped_df:
if key == 0:
kernel_time = grouped_df.get_group(key).sum()
nccl_time = 0
grouped_df = df_gpu.groupby("name")["duration"]
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("nccl") != -1:
nccl_time = nccl_time + grouped_df.get_group(key).sum()
features = comm_profile(logdir, cfg, df_gpu, features)
get_top_k_events(cfg, df_gpu, 10)
df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'],
'value':[gpu_time, num_gpus, kernel_time, nccl_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def strace_profile(logdir, cfg, df, features):
print_title('STRACE Profiling:')
return features
def net_profile(logdir, cfg, df, features):
if not cfg.cluster_ip:
print_title("Network Profiling:")
grouped_df = df.groupby("name")["duration"]
net_time = 0
n_packets = 0
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("network:tcp:") != -1:
net_time = net_time + grouped_df.get_group(key).sum()
n_packets = n_packets + 1
#print(("total network time (s) = %.3lf" % net_time))
#print(("total amount of network packets = %d" % n_packets))
# total network packet
packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0)
# total network traffic
packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0)
# ================ change pandas table columns and index name ====
rename_index = packet_sum_matrix.index.tolist()
rename_index2 = packet_num_matrix.index.tolist()
rename_columns = packet_sum_matrix.columns.tolist()
rename_columns2 = packet_num_matrix.columns.tolist()
def zero(s):
if s[0:2] == '00':
s = s[2]
elif (s[0] == '0') and (s[1] != '0'):
s = s[1:3]
return(s)
def check_str(rename_list):
rename_list_new = []
for j in rename_list:
j = str(int(j))
a = j[-9:-6]
b = j[-6:-3]
c = j[-3:]
j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_list_new.append(j)
return(rename_list_new)
def check_str2(rename_list):
rename_columns_2 = []
for i in rename_list:
i = str(int(i[0]))
a = i[-9:-6]
b = i[-6:-3]
c = i[-3:]
i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_columns_2.append(i)
return(rename_columns_2)
rename_index_new = check_str(rename_index)
rename_index_new = dict(zip(rename_index, rename_index_new))
rename_index2_new = check_str2(rename_index2)
rename_index2_final = list(set(rename_index2_new))
rename_index2_final.sort(key=rename_index2_new.index)
rename_columns_new = check_str(rename_columns)
rename_columns_new = dict(zip(rename_columns, rename_columns_new))
rename_columns2_new = check_str(rename_columns2)
rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new))
# rename here
packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new)
packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new)
packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new)
packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True)
if cfg.verbose:
print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n")
print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n")
network_value = []
src = []
dst = []
final = []
for index in packet_sum_matrix.index:
for column in packet_sum_matrix.columns:
src.append(index)
dst.append(column)
network_value.append(packet_sum_matrix[column][index])
record = list(zip(src, dst, network_value))
record.sort(key=lambda tup:tup[2], reverse=True)
for src, dst, value in record:
if value == 0:
pass
else:
item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)]
final.append(item)
summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary.to_csv(logdir + 'netrank.csv',
mode='w',
header=True,
index=False)
df = pd.DataFrame({'name':['net_time'],
'value':[net_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def convertbyte(B):
B = int(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{} Bytes'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def convertbytes(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0:.2f} B/s'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB/s'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB/s'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB/s'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB/s'.format(B/TB)
def netbandwidth_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('Network Bandwidth Profiling:')
tx = df['event'] == float(0)
rx = df['event'] == float(1)
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
first_line = lines[0]
last_line = lines[-1]
tx_begin = first_line.split(',')[1]
rx_begin = first_line.split(',')[2]
tx_end = last_line.split(',')[1]
rx_end = last_line.split(',')[2]
tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1])
rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2])
if not cfg.cluster_ip:
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
if cfg.verbose:
print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount)))
print('Amount of tx : %s' % convertbyte(tx_amount))
print('Amount of rx : %s' % convertbyte(rx_amount))
print('Bandwidth Quartile :')
print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1)))
print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2)))
print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3)))
print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean)))
#network chart part
all_time = df[tx]['timestamp'].tolist()
all_tx = df[tx]['bandwidth'].tolist()
all_rx = df[rx]['bandwidth'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx')
plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx')
plt.legend(loc='upper right')
plt.title("Network Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Bandwidth (bytes)", fontsize=16)
fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight')
if not cfg.cluster_ip and cfg.verbose:
print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir)
df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'],
'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def blktrace_latency_profile(logdir, cfg, df, features):
with open('%s/btt.txt' % logdir) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '==================== All Devices ====================' in line:
start = i
if '==================== Device Merge Information ====================' in line:
end = i
break
bttoutput_result = lines[start:end]
df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end'))
time = df_offset['time'].tolist()
start_b = df_offset['start'].tolist()
end_b = df_offset['end'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block')
plt.legend(loc='upper right')
plt.title("Block Offset Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Block Number", fontsize=16)
fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight')
print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir)
if cfg.verbose:
print_title('Storage Profiling:')
print('Blktracae Latency (s):')
for btt in bttoutput_result:
print(btt[:-1])
blktrace_latency = df['event'] == 'C'
blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25)
blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5)
blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75)
blktrace_latency_mean = df[blktrace_latency]['duration'].mean()
df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'],
'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def diskstat_profile(logdir, cfg, df, features):
#diskstat_dev = list(set(df['dev']))
diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25)
diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25)
diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25)
diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5)
diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5)
diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5)
diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75)
diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75)
diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75)
diskstat_r_avg = df.groupby('dev')['d_read'].mean()
diskstat_w_avg = df.groupby('dev')['d_write'].mean()
diskstat_avg = df.groupby('dev')['d_disk_total'].mean()
diskstat_r_iops = df.groupby('dev')['r_iops'].mean()
diskstat_w_iops = df.groupby('dev')['w_iops'].mean()
diskstat_iops = df.groupby('dev')['iops'].mean()
diskstat_wait = df.groupby('dev')['await_time'].mean()
diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg,
diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg,
diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg,
diskstat_r_iops, diskstat_w_iops, diskstat_iops,
diskstat_wait], axis=1, sort=False)
diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)',
'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)',
'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)',
'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)']
diskstat_table.columns = diskstat_columns
diskstat_dev = diskstat_table.index.format()
final_table = pd.DataFrame(columns=diskstat_columns)
for j, dev in enumerate(diskstat_dev):
tmp_list = []
for i in diskstat_columns[:-4]:
tmp_list.append(convertbytes(diskstat_table.iloc[j][i]))
for i in diskstat_columns[-4:-1]:
tmp_list.append('%d' % int(diskstat_table.iloc[j][i]))
tmp_list.append('%.3lf ms' % diskstat_table.iloc[j][-1])
tmp_table = pd.DataFrame([tuple(tmp_list)],
columns=diskstat_columns,
index=[dev])
final_table = pd.concat([final_table, tmp_table])
if cfg.verbose:
print_title('DISKSTAT Profiling:')
print('Disk Throughput Quartile :')
print(final_table.T)
df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'],
'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def cpu_profile(logdir, cfg, df):
if cfg.verbose:
print_title('CPU Profiling:')
print('elapsed_time (s) = %.6lf' % cfg.elapsed_time)
grouped_df = df.groupby("deviceId")["duration"]
total_exec_time = 0
for key, item in grouped_df:
print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum())))
total_exec_time = total_exec_time + grouped_df.get_group(key).sum()
print("total execution time (s) = %.3lf" % total_exec_time)
cpu_detail_profile_df = df[['timestamp','duration','name']]
cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False)
cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100
cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']]
print(cpu_detail_profile_df[:20].to_string(index=False))
def vmstat_profile(logdir, cfg, df, features):
_,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str
for col_name in ('si','so','bi','bo','in','cs'):
df[col_name] = df[col_name].str[3:]
vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float)
vm_bi = vmstat_traces['bi'].mean()
vm_bo = vmstat_traces['bo'].mean()
vm_cs = vmstat_traces['cs'].mean()
vm_in = vmstat_traces['in'].mean()
if cfg.verbose:
print_title('VMSTAT Profiling:')
print('average bi/s: %d' % int(vm_cs))
print('average bo/s: %d' % int(vm_in))
print('average cs/s: %d' % int(vm_bi))
print('average in/s: %d' % int(vm_bo))
df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ],
'value':[vm_bi, vm_bo, vm_cs, vm_in] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def mpstat_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('MPSTAT Profiling:')
num_cores = int(df['deviceId'].max() + 1)
df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ'])
_,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str
df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float)
df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0))
df["t_USR"] = df['dt_all'] * df['USR']/100.0
df["t_SYS"] = df['dt_all'] * df['SYS']/100.0
df["t_IDL"] = df['dt_all'] * df['IDL']/100.0
df["t_IOW"] = df['dt_all'] * df['IOW']/100.0
df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0
dfs=[]
for i in range(num_cores):
dfs.append(df.loc[df['deviceId'] == float(i)])
for index,dff in enumerate(dfs):
df_summary.iloc[index]['USR'] = dff['t_USR'].sum()
df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum()
df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum()
df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum()
df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum()
if not cfg.cluster_ip and cfg.verbose:
print('CPU Utilization (%):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum),
int(100.0*df_summary.iloc[i]['SYS']/t_sum),
int(100.0*df_summary.iloc[i]['IDL']/t_sum),
int(100.0*df_summary.iloc[i]['IOW']/t_sum),
int(100.0*df_summary.iloc[i]['IRQ']/t_sum) ))
if not cfg.cluster_ip and cfg.verbose:
print('CPU Time (s):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i,
df_summary.iloc[i]['USR'],
df_summary.iloc[i]['SYS'],
df_summary.iloc[i]['IDL'],
df_summary.iloc[i]['IOW'],
df_summary.iloc[i]['IRQ'] ))
total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum()
cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time))
if not cfg.cluster_ip and cfg.verbose:
print('Active CPU Time (s): %.3lf' % total_cpu_time)
print('Active CPU ratio (%%): %3d' % cpu_util)
df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'],
'value':[num_cores, cpu_util] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def sofa_analyze(cfg):
print_main_progress('SOFA analyzing...')
filein = []
df_cpu = pd.DataFrame([], columns=cfg.columns)
df_gpu = pd.DataFrame([], columns=cfg.columns)
df_net = pd.DataFrame([], columns=cfg.columns)
df_mpstat = pd.DataFrame([], columns=cfg.columns)
df_vmstat = pd.DataFrame([], columns=cfg.columns)
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
df_blktrace = pd.DataFrame([], columns=cfg.columns)
df_diskstat = pd.DataFrame([], columns=cfg.columns)
df_nvsmi = pd.DataFrame([], columns=cfg.columns)
iter_summary = None
logdir = cfg.logdir
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
filein_gpu = logdir + "gputrace.csv"
filein_cpu = logdir + "cputrace.csv"
filein_net = logdir + "nettrace.csv"
filein_vmstat = logdir + "vmstat.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_strace = logdir + "strace.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
filein_blktrace = logdir + "blktrace.csv"
filein_diskstat = logdir + "diskstat_vector.csv"
if os.path.isfile('%s/nvlink_topo.txt' % logdir):
with open(logdir + 'nvlink_topo.txt') as f:
lines = f.readlines()
if len(lines) > 0:
title = lines[0]
num_gpus = 1
for word in title.split():
if re.match(r'GPU', word) != None :
num_gpus = num_gpus + 1
print_info(cfg,'# of GPUs: ' + str(num_gpus) )
edges = []
if len(lines) >= num_gpus+1:
for i in range(num_gpus):
connections = lines[1+i].split()
for j in range(len(connections)):
if connections[j] == 'NV1' or connections[j] == 'NV2':
edges.append((i,j-1))
#print('%d connects to %d' % (i, j-1))
ring_found = False
G = nx.DiGraph(edges)
# Try to find ring with its length of num_gpus
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus:
if cfg.verbose:
print('One of the recommended ring having length of %d' % len(cycle))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Try to find ring with its length of num_gpus/2
if not ring_found:
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus/2:
print(("One of the recommended ring having length of %d" % len(cycle) ))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Construct Performance Features
features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value'])
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
if not df_nvsmi.empty and cfg.spotlight_gpu:
state = 0
sm_high = 0
trigger = 10
for i in range(len(df_nvsmi)):
if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 :
if df_nvsmi.iloc[i].duration >= 50:
sm_high = min(trigger, sm_high + 1)
if df_nvsmi.iloc[i].duration < 10:
sm_high = max(0, sm_high - 1)
if state == 0 and sm_high == trigger:
state = 1
cfg.roi_begin = df_nvsmi.iloc[i].timestamp
elif state == 1 and sm_high == 0:
state = 0
cfg.roi_end = df_nvsmi.iloc[i].timestamp
#print('sm_high=%d state=%d' % (sm_high, state))
if cfg.roi_end - cfg.roi_begin < 0:
cfg.roi_end = 0
cfg.roi_begin = 0
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_cpu = pd.read_csv(filein_cpu)
if not df_cpu.empty:
if cfg.verbose:
cpu_profile(logdir, cfg, df_cpu)
if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms:
df_cpu, swarms = hsg_v2(cfg, df_cpu)
except IOError as e:
df_cpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_cpu)
try:
df_strace = pd.read_csv(filein_strace)
if not df_strace.empty:
features = strace_profile(logdir, cfg, df_strace, features)
except IOError as e:
df_strace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_strace)
try:
df_net = pd.read_csv(filein_net)
if not df_net.empty:
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
if not df_bandwidth.empty:
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
try:
df_blktrace = pd.read_csv(filein_blktrace)
if not df_blktrace.empty:
features = blktrace_latency_profile(logdir, cfg, df_blktrace, features)
except IOError as e:
df_blktrace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_blktrace)
try:
df_diskstat = pd.read_csv(filein_diskstat)
if not df_diskstat.empty:
features = diskstat_profile(logdir, cfg, df_diskstat, features)
except IOError as e:
df_diskstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_diskstat)
try:
df_vmstat = pd.read_csv(filein_vmstat)
if not df_vmstat.empty:
features = vmstat_profile(logdir, cfg, df_vmstat, features)
except IOError as e:
df_vmstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_vmstat)
try:
df_mpstat = pd.read_csv(filein_mpstat)
if not df_mpstat.empty:
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_gpu = pd.read_csv(filein_gpu)
if not df_gpu.empty:
features = gpu_profile(logdir, cfg, df_gpu, features)
except IOError:
df_gpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu)
try:
if len(df_mpstat)>0:
df_nvsmi.append(df_mpstat.iloc[0])
features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features)
except IOError as e:
print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis")
if cfg.enable_aisi:
selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features)
if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no':
print_title('Final Performance Features')
print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) )
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
if cfg.spotlight_gpu:
try:
print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value)
except:
print_warning(cfg, 'elpased_hostspot_time is not defined.')
if cfg.potato_server:
if cfg.potato_server.find(':') == -1:
cfg.potato_server = cfg.potato_server + ':50051'
hint, docker_image = get_hint(cfg.potato_server, features)
df_report = pd.read_json(hint, orient='table')
file_potato_report = cfg.logdir + 'potato_report.html'
# Export report to HTML file.
df_report.to_html(file_potato_report )
with open(file_potato_report, 'a') as f:
f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>')
print_title('POTATO Feedback')
print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) )
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
value = df_report.iloc[i]['Value']
ref_value = df_report.iloc[i]['ReferenceValue']
print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30)))
print('\n')
print_hint('General Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
print('\n')
print_hint('Framework-specific Optimization Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric == 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
#print(df_report[['Metric', 'Value', 'Reference Value']])
#print(df_report[['Suggestion']])
#print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image))
print('\n')
print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.')
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print('\n\n')
print('Complete!!')
def cluster_analyze(cfg):
if cfg.verbose:
print_title('Cluster Network Profiling :')
cluster = cfg.cluster_ip.split(',')
summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util'])
summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
all = []
for i, ip in enumerate(cluster):
features = pd.DataFrame({'name':['elapsed_time'],
'value':[cfg.elapsed_time]},
columns=['name','value'])
node = 'node ' + str(i)
if cfg.verbose:
print('node ' + str(i) + ' is ' + ip)
logdir = tmp_dir[0:-1] + '-' + ip + '/'
filein_net = logdir + "nettrace.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
try:
df_net = pd.read_csv(filein_net)
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_mpstat = pd.read_csv(filein_mpstat)
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
sm = int(features[features['name'] == 'gpu_sm_util']['value'])
mem = int(features[features['name'] == 'gpu_mem_util']['value'])
cpu = int(features[features['name'] == 'cpu_util']['value'])
sm_mem_cpu = [sm, mem, cpu]
compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util'])
summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])])
net_tmp = pd.read_csv(logdir + "netrank.csv")
summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])])
# for bandwidth report
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[tx]['bandwidth'].mean())]
rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[rx]['bandwidth'].mean())]
band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx'])
rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx'])
band_tmp = pd.concat([band_tmp, rx_pd])
summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])])
if cfg.verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print('Ranked Network Traffic : \n', summary_net, '\n')
print('Cluster Bandwidth Quartile: \n', summary_band)
print_title('Cluster Computation Profiling:')
print(summary_compute)
| apache-2.0 |
thp44/delphin_6_automation | data_process/2d_1d/archieve/temperature.py | 1 | 18075 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import matplotlib.dates as mdates
import pandas as pd
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
# Application
colors = {'top': '#FBBA00', 'mid': '#B81A5D', 'bottom': '#79C6C0', '1d_brick': '#000000', '1d_mortar': '#BDCCD4'}
project_dict = {'dresden_zp_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0352e2cb22f2c4f15b4': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb0a102e2cb22f2c4f17e9': '2d'}
},
'dresden_zd_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0ba2e2cb22f2c4f15f1': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb2dc02e2cb22f2c4f1873': '2d'}
},
'potsdam_high_ratio_uninsulated_4a':
{'map':
{'5ad9e3462e2cb22f2c4f162e': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adcc9702e2cb22f2c4f18fd': '2d'}
},
'dresden_zp_low_ratio_uninsulated_4a':
{'map':
{'5ad9e6192e2cb22f2c4f175f': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adda7172e2cb20baca57c6e': '2d'}
},
'dresden_zd_low_ratio_uninsulated_4a':
{'map':
{'5ad9e44f2e2cb22f2c4f16a8': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adcd4402e2cb22f2c4f1987': '2d'}
},
'potsdam_low_ratio_uninsulated_4a':
{'map': {'5ad9e4f22e2cb22f2c4f16e5': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5add9b902e2cb20baca57be4': '2d'}
},
'dresden_zp_high_ratio_insulated_4a':
{'map': {'5ae824252e2cb22d48db5955': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae355cf2e2cb2201055c1a4': '2d'}
},
'dresden_zd_high_ratio_insulated_4a':
{'map': {'5ae824d82e2cb22d48db5998': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae398f12e2cb2201055c263': '2d'}
},
'potsdam_high_ratio_insulated_4a':
{'map':
{'5ae82bac2e2cb21560008fe8': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae6ca982e2cb2201055c322': '2d'}
},
'dresden_zp_low_ratio_insulated_4a':
{'map':
{'5ae82e5d2e2cb21560009137': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6fdbf2e2cb20d5891272f': '2d'}
},
'dresden_zd_low_ratio_insulated_4a':
{'map':
{'5ae82cb12e2cb2156000906e': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6d9bf2e2cb2201055c3e1': '2d'}
},
'potsdam_low_ratio_insulated_4a':
{'map':
{'5ae82d3b2e2cb215600090b1': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6edaf2e2cb20d58912670': '2d'}
},
}
result_folder = r'U:\RIBuild\2D_1D\Results'
files = ['temperature profile.d6o']
# Functions
def get_points(result: dict, geo: dict):
points = []
for index_ in result['indices']:
x_ = geo['element_geometry'][index_][1]
y_ = geo['element_geometry'][index_][2]
points.append({'cell': index_, 'x': x_, 'y': y_})
return points
def add_data_to_points(points: list, results: dict, result_name: str):
for cell_ in results['result'].keys():
cell_index = int(cell_.split('_')[1])
for point in points:
if point['cell'] == cell_index:
point[result_name] = np.array(results['result'][cell_][8760:])
break
def main(project_):
projects = list(project_dict[project_]['map'].keys())
parsed_dicts = {'brick_1d': {'temp': {}, 'geo': {}},
'mortar_1d': {'temp': {}, 'geo': {}},
'2d': {'temp': {}, 'geo': {}}, }
for p_ in projects:
for mp_key in project_dict[project_]['map'].keys():
if p_ == mp_key:
key = project_dict[project_]['map'][mp_key]
folder = result_folder + f'/{p_}/results'
geo_file = [file
for file in os.listdir(folder)
if file.endswith('.g6a')][0]
parsed_dicts[key]['temp'], _ = delphin_parser.d6o_to_dict(folder, files[0])
parsed_dicts[key]['geo'] = delphin_parser.g6a_to_dict(folder, geo_file)
x_date = [datetime.datetime(2020, 1, 1) + datetime.timedelta(hours=i)
for i in range(len(parsed_dicts['brick_1d']['temp']['result']['cell_0'][8760:]))]
# Brick 1D
brick_1d = get_points(parsed_dicts['brick_1d']['temp'], parsed_dicts['brick_1d']['geo'])
brick_1d.sort(key=lambda point: point['x'])
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['temp'], 'temperature')
# Mortar 1D
mortar_1d = get_points(parsed_dicts['mortar_1d']['temp'], parsed_dicts['mortar_1d']['geo'])
mortar_1d.sort(key=lambda point: point['x'])
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['temp'], 'temperature')
# 2D
sim_2d = get_points(parsed_dicts['2d']['temp'], parsed_dicts['2d']['geo'])
sim_2d.sort(key=lambda point: (point['x'], point['y']))
add_data_to_points(sim_2d, parsed_dicts['2d']['temp'], 'temperature')
# Plots
def plot_locations(quantity):
# Axes 00
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[0]['x']:.4f} and 2D-Location: {sim_2d[0]['x']:.4f}")
plt.plot(x_date, brick_1d[0][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[0][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[0][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[1][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[2][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 01
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[1]['x']:.4f} and 2D-Location: {sim_2d[3]['x']:.4f}")
plt.plot(x_date, brick_1d[1][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[1][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[3][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[4][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[5][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 10
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[2]['x']:.4f} and 2D-Location: {sim_2d[6]['x']:.4f}")
plt.plot(x_date, brick_1d[2][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[2][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[6][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[7][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[8][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 11
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[3]['x']:.4f} and 2D-Location: {sim_2d[9]['x']:.4f}")
plt.plot(x_date, brick_1d[3][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[3][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[9][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[10][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[11][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 20
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[4]['x']:.4f} and 2D-Location: {sim_2d[12]['x']:.4f}")
plt.plot(x_date, brick_1d[4][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[4][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[12][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[13][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[14][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 21
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[5]['x']:.4f} and 2D-Location: {sim_2d[15]['x']:.4f}")
plt.plot(x_date, brick_1d[5][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[5][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[15][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[16][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[17][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
#plot_locations(quantity='temperature')
#plt.show()
def abs_diff(x1, x2):
return x2 - x1
def rel_diff(x1, x2):
return (abs(x2 - x1))/abs(x2) * 100
def differences(i, plots=False):
avg_2d = np.mean([sim_2d[i]['temperature'], sim_2d[i+2]['temperature'], sim_2d[i+2]['temperature']], axis=0)
brick_abs = abs_diff(brick_1d[i]['temperature'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['temperature'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['temperature'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['temperature'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Temperature - Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('C')
plt.figure()
plt.title(f"Temperature - Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
def differences_weighted(i, plots=False):
avg_2d = np.average(a=[sim_2d[i]['temperature'],
sim_2d[i+2]['temperature'],
sim_2d[i+2]['temperature']],
axis=0,
weights=[56, 24., 56])
brick_abs = abs_diff(brick_1d[i]['temperature'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['temperature'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['temperature'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['temperature'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Temperature - Weighted Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
plt.figure()
plt.title(f"Temperature - Weighted Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
dataframes = []
weighted_dataframes = []
for index in range(len(brick_1d)):
dataframes.append(differences(index))
weighted_dataframes.append(differences_weighted(index))
#plt.show()
result_dataframe = pd.concat(dataframes, axis=1)
w_result_dataframe = pd.concat(weighted_dataframes, axis=1)
absolute_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
absolute_df.columns = absolute_df.columns.droplevel(level=2)
relative_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
relative_df.columns = relative_df.columns.droplevel(level=2)
w_absolute_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
w_absolute_df.columns = w_absolute_df.columns.droplevel(level=2)
w_relative_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
w_relative_df.columns = w_relative_df.columns.droplevel(level=2)
plt.figure()
ax = absolute_df.boxplot()
ax.set_ylim(-20, 20)
ax.set_ylabel('Temperature - C')
ax.set_title('Absolute Differences')
#plt.show()
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
def excel():
writer = pd.ExcelWriter(out_folder + '/temperature.xlsx')
relative_df.describe().to_excel(writer, 'relative')
absolute_df.describe().to_excel(writer, 'absolute')
writer.save()
#excel()
def save_relative():
hdf_file = out_folder + '/relative_temperature.h5'
w_relative_df.to_hdf(hdf_file, project_, append=True)
save_relative()
for project_key in project_dict.keys():
print(f'Processing {project_key}')
main(project_key) | mit |
miyyer/qb | qanta/hyperparam.py | 2 | 1848 | import copy
import json
import yaml
from sklearn.model_selection import ParameterGrid
def expand_config(base_file, hyper_file, output_file):
"""
This is useful for taking the qanta.yaml config, a set of values to try for different hyper parameters, and
generating a configuration representing each value in the parameter sweep
"""
with open(base_file) as f:
base_conf = yaml.load(f)
with open(hyper_file) as f:
hyper_conf = yaml.load(f)
all_base_guessers = base_conf["guessers"]
final_guessers = {}
for guesser, params in hyper_conf["parameters"].items():
base_guesser_conf = all_base_guessers[guesser]
if len(base_guesser_conf) != 1:
raise ValueError(
"More than one configuration for parameter tuning base is invalid"
)
base_guesser_conf = base_guesser_conf[0]
parameter_set = set(base_guesser_conf.keys()) | set(params.keys())
param_grid = {}
for p in parameter_set:
if p in params:
param_grid[p] = params[p]
else:
param_grid[p] = [base_guesser_conf[p]]
parameter_list = list(ParameterGrid(param_grid))
final_guessers[guesser] = parameter_list
final_conf = copy.deepcopy(base_conf)
for g in final_conf["guessers"]:
if g in final_guessers:
final_conf["guessers"][g] = copy.deepcopy(final_guessers[g])
# There is a bug in yaml.dump that doesn't handle outputting nested dicts/arrays correctly. I didn't want to debug
# So instead output to json then convert that to yaml
with open("/tmp/qanta-tmp.json", "w") as f:
json.dump(final_conf, f)
with open("/tmp/qanta-tmp.json") as f:
conf = json.load(f)
with open(output_file, "w") as f:
yaml.dump(conf, f)
| mit |
andnovar/ggplot | ggplot/scales/scale_colour_gradient.py | 12 | 2017 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter
def colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]):
return [rgb2hex(cmap(bb)[:3]) for bb in breaks]
class scale_colour_gradient(scale):
"""
Specify a two- or three-point gradient.
Parameters
----------
name : Name of an existing gradient scheme
limits : list of the upper and lower bounds of the gradient
low : colour at the lower bound of the gradient
mid : colour at the middle of the gradient
high : Colour at the upper bound of the gradient
Examples
--------
>>> from ggplot import *
>>> diamons_premium = diamonds[diamonds.cut=='Premium']
>>> gg = ggplot(diamons_premium, aes(x='depth', y='carat', colour='price')) + \\
... geom_point()
>>> print(gg + scale_colour_gradient(low='red', mid='white', high='blue', limits=[4000,6000]) + \\
... ggtitle('With red-blue gradient'))
>>> print(gg + ggtitle('With standard gradient'))
"""
VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.color_label = self.name
if not (self.limits is None):
gg.color_limits = self.limits
color_spectrum = []
if self.low:
color_spectrum.append(self.low)
if self.mid:
color_spectrum.append(self.mid)
if self.high:
color_spectrum.append(self.high)
if self.low and self.high:
gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum)
plt.cm.register_cmap(cmap=gradient2n)
# add them back to ggplot
gg.color_scale = colors_at_breaks(gradient2n)
gg.colormap = gradient2n
return gg
| bsd-2-clause |
DistrictDataLabs/django-data-product | irisfinder/views.py | 1 | 1948 | from django.shortcuts import render
import datetime
from models import Iris, SVMModels
from forms import UserIrisData
import sklearn
from sklearn import svm
from sklearn.cross_validation import train_test_split
import numpy as np
from django.conf import settings
import cPickle
import scipy
from pytz import timezone
import random
# Create your views here.
def predict(request):
data = {
"app_name": "irisfinder",
"random_number": random.randint(0, 10000)
}
if request.method == "GET":
form = UserIrisData()
data.update({"form": form, "submit": True})
elif request.method == "POST":
form = UserIrisData(request.POST)
sepal_length = request.POST.get("sepal_length")
sepal_width = request.POST.get("sepal_width")
petal_length = request.POST.get("petal_length")
petal_width = request.POST.get("petal_width")
if request.POST.get('submit'):
user_data = Iris(user_data=True,
sepal_length=sepal_length,
sepal_width=sepal_width,
petal_length=petal_length,
petal_width=petal_width)
user_data.save()
model_object = SVMModels.objects.order_by("-run_date").first()
model = cPickle.loads(model_object.model_pickle)
prediction = model.predict([sepal_length, sepal_width, petal_length, petal_width])
item_pk = user_data.pk
species = prediction[0]
data.update({"form": form, "verify": True, "item_pk": item_pk,
"species": species, "prediction": prediction[0]})
elif request.POST.get('verified'):
user_data = Iris.objects.get(pk=int(request.POST.get("item_pk")))
user_data.species = request.POST.get("species")
user_data.save()
return render(request, "predict.html", context=data) | apache-2.0 |
LaboratoireMecaniqueLille/crappy | crappy/blocks/grapher.py | 1 | 5641 | # coding: utf-8
import numpy as np
from .block import Block
from .._global import OptionalModule
try:
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
except (ModuleNotFoundError, ImportError):
plt = OptionalModule("matplotlib")
Button = OptionalModule("matplotlib")
class Grapher(Block):
"""The grapher receive data from a block (via a :ref:`Link`) and plots it."""
def __init__(self,
*labels,
length=0,
freq=2,
maxpt=20000,
window_size=(8, 8),
window_pos=None,
interp=True,
backend="TkAgg"):
"""Sets the args and initializes the parent class.
Args:
*labels (:obj:`tuple`): Tuples of the columns labels of input data for
plotting. You can add as much as you want, depending on your
performances. The first value is the `x` label, the second is the `y`
label.
length (:obj:`int`, optional): If `0` the graph is static and displays
all data from the start of the assay. Else only displays the last
``length`` received chunks, and drops the previous ones.
freq (:obj:`float`, optional): The refresh rate of the graph. May cause
high memory consumption if set too high.
maxpt (:obj:`int`, optional): The maximum number of points displayed on
the graph. When reaching this limit, the block deletes one point out of
two but this is almost invisible to the user.
window_size (:obj:`tuple`, optional): The size of the graph, in inches.
window_pos (:obj:`tuple`, optional): The position of the graph in pixels.
The first value is for the `x` direction, the second for the `y`
direction. The origin is the top left corner. Works with multiple
screens.
interp (:obj:`bool`, optional): If :obj:`True`, the points of data will
be linked to the following by straight lines. Else, each value wil be
displayed as constant until the next update.
backend (:obj:`int`, optional): The :mod:`matplotlib` backend to use.
Example:
::
graph = Grapher(('t(s)', 'F(N)'), ('t(s)', 'def(%)'))
will plot a dynamic graph with two lines plot (`F=f(t)` and `def=f(t)`).
::
graph = Grapher(('def(%)', 'F(N)'), length=0)
will plot a static graph.
::
graph = Grapher(('t(s)', 'F(N)'), length=30)
will plot a dynamic graph displaying the last 30 chunks of data.
"""
Block.__init__(self)
self.niceness = 10
self.length = length
self.freq = freq
self.maxpt = maxpt
self.window_size = window_size
self.window_pos = window_pos
self.interp = interp
self.backend = backend
self.labels = labels
def prepare(self):
if self.backend:
plt.switch_backend(self.backend)
self.f = plt.figure(figsize=self.window_size)
self.ax = self.f.add_subplot(111)
self.lines = []
for _ in self.labels:
if self.interp:
self.lines.append(self.ax.plot([], [])[0])
else:
self.lines.append(self.ax.step([], [])[0])
# Keep only 1/factor points on each line
self.factor = [1 for _ in self.labels]
# Count to drop exactly 1/factor points, no more and no less
self.counter = [0 for _ in self.labels]
legend = [y for x, y in self.labels]
plt.legend(legend, bbox_to_anchor=(-0.03, 1.02, 1.06, .102), loc=3,
ncol=len(legend), mode="expand", borderaxespad=1)
plt.xlabel(self.labels[0][0])
plt.ylabel(self.labels[0][1])
plt.grid()
self.axclear = plt.axes([.8, .02, .15, .05])
self.bclear = Button(self.axclear, 'Clear')
self.bclear.on_clicked(self.clear)
if self.window_pos:
mng = plt.get_current_fig_manager()
mng.window.wm_geometry("+%s+%s" % self.window_pos)
plt.draw()
plt.pause(.001)
def clear(self, event=None):
for line in self.lines:
line.set_xdata([])
line.set_ydata([])
self.factor = [1 for _ in self.labels]
self.counter = [0 for _ in self.labels]
def loop(self):
# We need to recv data from all the links, but keep
# ALL of the data, even with the same label (so not get_all_last)
data = self.recv_all_delay()
for i, (lx, ly) in enumerate(self.labels):
x, y = 0, 0 # So that if we don't find it, we do nothing
for d in data:
if lx in d and ly in d: # Find the first input with both labels
dx = d[lx][self.factor[i]-self.counter[i]-1::self.factor[i]]
dy = d[ly][self.factor[i]-self.counter[i]-1::self.factor[i]]
self.counter[i] = (self.counter[i]+len(d[lx])) % self.factor[i]
x = np.hstack((self.lines[i].get_xdata(), dx))
y = np.hstack((self.lines[i].get_ydata(), dy))
break
if isinstance(x, int):
break
if self.length and len(x) >= self.length:
# Remove the beginning if the graph is dynamic
x = x[-self.length:]
y = y[-self.length:]
elif len(x) > self.maxpt:
# Reduce the number of points if we have to many to display
print("[Grapher] Too many points on the graph {} ({}>{})".format(
i, len(x), self.maxpt))
x, y = x[::2], y[::2]
self.factor[i] *= 2
print("[Grapher] Resampling factor is now {}".format(self.factor[i]))
self.lines[i].set_xdata(x)
self.lines[i].set_ydata(y)
self.ax.relim() # Update the window
self.ax.autoscale_view(True, True, True)
self.f.canvas.draw() # Update the graph
self.f.canvas.flush_events()
def finish(self):
plt.close("all")
| gpl-2.0 |
dmitriz/zipline | zipline/utils/tradingcalendar.py | 6 | 11182 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from functools import partial
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
notkarol/banjin | experiment/python_word_matching_speed.py | 1 | 4650 | #!/usr/bin/python
# Takes in a dictionary of words
# Verifies that all functions return the same answers
# Generates random hands from the probability of getting tiles from the bunch
# Then prints out how long each function takes to find all matching words
# Generates various hand sizes to see if there's any scaling
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import sys
import timeit
# Naive list way of matching wordbank
def f0_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i][i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if min(list(map(lambda x: x[1] - x[0], zip(wordbank[w_i], hand)))) >= 0:
results.append(w_i)
return results
# Naive way using numpy
def f0_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i,i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if not np.any((hand - wordbank[w_i]) < 0):
results.append(w_i)
return results
# A for loop and some numpy
def f2_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if np.min(hand - wordbank[w_i]) >= 0:
results.append(w_i)
return results
# Vectorized sum and difference
def f3_np(hand, wordbank):
return np.where(np.sum((wordbank - hand) > 0, axis=1) == 0)[0]
# vectorized just using any
def f4_np(hand, wordbank):
return np.where(np.any(wordbank > hand, axis=1) == 0)[0]
# Prepare a 2D list and a 2D np array of letter frequencies
with open(sys.argv[1]) as f:
words = [x.split()[0] for x in f.readlines()]
wordbank_list = [[0] * 26 for _ in range(len(words))]
wordbank_np = np.zeros((len(words), 26))
for w_i in range(len(words)):
for letter in sorted(words[w_i]):
pos = ord(letter) - 65
wordbank_list[w_i][pos] += 1
wordbank_np[w_i][pos] += 1
# Arrays for keeping track of functions and data-specific wordbanks
hand_sizes = list(range(2, 9))
functions = {'list' : [f0_list, f1_list],
'numpy': [f0_np, f1_np, f2_np, f3_np, f4_np]}
wordbanks = {'list' : wordbank_list,
'numpy': wordbank_np}
n_iter = 10 if len(sys.argv) < 3 else int(sys.argv[2])
timings = {}
for datatype in functions:
timings[datatype] = np.zeros((max(hand_sizes) + 1, n_iter, len(functions[datatype])))
# Verify that our functions give the same answers
for datatype in functions:
for func in functions[datatype]:
print(datatype, func(wordbanks[datatype][len(wordbank_list) // 2], wordbanks[datatype]))
# Time each word
imports = 'from __main__ import functions, wordbanks'
for counter in range(n_iter):
for hand_size in hand_sizes:
# Get a specific hand size
hand = [13,3,3,6,18,3,4,3,12,2,2,5,3,8,11,3,2,9,6,9,6,3,3,2,3,2]
while sum(hand) > hand_size:
pos = np.random.randint(sum(hand))
for i in range(len(hand)):
pos -= hand[i]
if pos < 0:
hand[i] -= 1
break
hand = str(hand)
# For this hand go wild
for datatype in functions:
for f_i in range(len(functions[datatype])):
cmd = 'functions["%s"][%i](%s, wordbanks["%s"])' % (datatype, f_i, hand, datatype)
timings[datatype][hand_size, counter, f_i] += timeit.timeit(cmd, imports, number=8)
print("\rCompleted %.1f%%" % (100 * (counter + 1) / n_iter), end='')
print()
# Save words and timings in case we're doing a long-lasting operation
filename = 'word_matching_timings_%s.pkl' % os.path.basename(sys.argv[1])
with open(filename, 'wb') as f:
print("Saving", filename)
pickle.dump((words, wordbanks, timings), f)
# Show Results
for datatype in functions:
means = np.mean(timings[datatype], axis=1)
for f_i in range(means.shape[1]):
plt.semilogy(hand_sizes, means[:, f_i][min(hand_sizes):], label='%s F%i' % (datatype, f_i))
plt.legend(loc='center left', bbox_to_anchor=(0.85, 0.5))
plt.xlabel("Hand Size")
plt.ylabel("Execution Time")
plt.title("Word Matching")
plt.show()
| mit |
rhoscanner-team/pcd-plotter | delaunay_example.py | 1 | 1435 | import numpy as np
from scipy.spatial import Delaunay
points = np.random.rand(30, 2) # 30 points in 2-d
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
# -- the same stuff for the convex hull
edges = set()
edge_points = []
for ia, ib in tri.convex_hull:
add_edge(ia, ib)
lines = LineCollection(edge_points)
plt.figure()
plt.title('Convex hull')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
| gpl-2.0 |
q1ang/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |